summaryrefslogtreecommitdiffstats
path: root/src/osd/PGBackend.h
blob: 12bdfc0d113b43be1d1ed44444c2cfdbc4298d0a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
 * Ceph - scalable distributed file system
 *
 * Copyright (C) 2013,2014 Inktank Storage, Inc.
 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
 *
 * Author: Loic Dachary <loic@dachary.org>
 *
 * This is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License version 2.1, as published by the Free Software
 * Foundation.  See file COPYING.
 *
 */

#ifndef PGBACKEND_H
#define PGBACKEND_H

#include "osd_types.h"
#include "common/WorkQueue.h"
#include "include/Context.h"
#include "os/ObjectStore.h"
#include "common/LogClient.h"
#include <string>
#include "PGTransaction.h"
#include "common/ostream_temp.h"

namespace Scrub {
  class Store;
}
struct shard_info_wrapper;
struct inconsistent_obj_wrapper;

//forward declaration
class OSDMap;
class PGLog;
typedef std::shared_ptr<const OSDMap> OSDMapRef;

 /**
  * PGBackend
  *
  * PGBackend defines an interface for logic handling IO and
  * replication on RADOS objects.  The PGBackend implementation
  * is responsible for:
  *
  * 1) Handling client operations
  * 2) Handling object recovery
  * 3) Handling object access
  * 4) Handling scrub, deep-scrub, repair
  */
 class PGBackend {
 public:
   CephContext* cct;
 protected:
   ObjectStore *store;
   const coll_t coll;
   ObjectStore::CollectionHandle &ch;
 public:
   /**
    * Provides interfaces for PGBackend callbacks
    *
    * The intention is that the parent calls into the PGBackend
    * implementation holding a lock and that the callbacks are
    * called under the same locks.
    */
   class Listener {
   public:
     /// Debugging
     virtual DoutPrefixProvider *get_dpp() = 0;

     /// Recovery

     /**
      * Called with the transaction recovering oid
      */
     virtual void on_local_recover(
       const hobject_t &oid,
       const ObjectRecoveryInfo &recovery_info,
       ObjectContextRef obc,
       bool is_delete,
       ObjectStore::Transaction *t
       ) = 0;

     /**
      * Called when transaction recovering oid is durable and
      * applied on all replicas
      */
     virtual void on_global_recover(
       const hobject_t &oid,
       const object_stat_sum_t &stat_diff,
       bool is_delete
       ) = 0;

     /**
      * Called when peer is recovered
      */
     virtual void on_peer_recover(
       pg_shard_t peer,
       const hobject_t &oid,
       const ObjectRecoveryInfo &recovery_info
       ) = 0;

     virtual void begin_peer_recover(
       pg_shard_t peer,
       const hobject_t oid) = 0;

     virtual void apply_stats(
       const hobject_t &soid,
       const object_stat_sum_t &delta_stats) = 0;

     /**
      * Called when a read from a std::set of replicas/primary fails
      */
     virtual void on_failed_pull(
       const std::set<pg_shard_t> &from,
       const hobject_t &soid,
       const eversion_t &v
       ) = 0;

     /**
      * Called when a pull on soid cannot be completed due to
      * down peers
      */
     virtual void cancel_pull(
       const hobject_t &soid) = 0;

     /**
      * Called to remove an object.
      */
     virtual void remove_missing_object(
       const hobject_t &oid,
       eversion_t v,
       Context *on_complete) = 0;

     /**
      * Bless a context
      *
      * Wraps a context in whatever outer layers the parent usually
      * uses to call into the PGBackend
      */
     virtual Context *bless_context(Context *c) = 0;
     virtual GenContext<ThreadPool::TPHandle&> *bless_gencontext(
       GenContext<ThreadPool::TPHandle&> *c) = 0;
     virtual GenContext<ThreadPool::TPHandle&> *bless_unlocked_gencontext(
       GenContext<ThreadPool::TPHandle&> *c) = 0;

     virtual void send_message(int to_osd, Message *m) = 0;
     virtual void queue_transaction(
       ObjectStore::Transaction&& t,
       OpRequestRef op = OpRequestRef()
       ) = 0;
     virtual void queue_transactions(
       std::vector<ObjectStore::Transaction>& tls,
       OpRequestRef op = OpRequestRef()
       ) = 0;
     virtual epoch_t get_interval_start_epoch() const = 0;
     virtual epoch_t get_last_peering_reset_epoch() const = 0;

     virtual const std::set<pg_shard_t> &get_acting_recovery_backfill_shards() const = 0;
     virtual const std::set<pg_shard_t> &get_acting_shards() const = 0;
     virtual const std::set<pg_shard_t> &get_backfill_shards() const = 0;

     virtual std::ostream& gen_dbg_prefix(std::ostream& out) const = 0;

     virtual const std::map<hobject_t, std::set<pg_shard_t>> &get_missing_loc_shards()
       const = 0;

     virtual const pg_missing_tracker_t &get_local_missing() const = 0;
     virtual void add_local_next_event(const pg_log_entry_t& e) = 0;
     virtual const std::map<pg_shard_t, pg_missing_t> &get_shard_missing()
       const = 0;
     virtual const pg_missing_const_i * maybe_get_shard_missing(
       pg_shard_t peer) const {
       if (peer == primary_shard()) {
	 return &get_local_missing();
       } else {
	 std::map<pg_shard_t, pg_missing_t>::const_iterator i =
	   get_shard_missing().find(peer);
	 if (i == get_shard_missing().end()) {
	   return nullptr;
	 } else {
	   return &(i->second);
	 }
       }
     }
     virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const {
       auto m = maybe_get_shard_missing(peer);
       ceph_assert(m);
       return *m;
     }

     virtual const std::map<pg_shard_t, pg_info_t> &get_shard_info() const = 0;
     virtual const pg_info_t &get_shard_info(pg_shard_t peer) const {
       if (peer == primary_shard()) {
	 return get_info();
       } else {
	 std::map<pg_shard_t, pg_info_t>::const_iterator i =
	   get_shard_info().find(peer);
	 ceph_assert(i != get_shard_info().end());
	 return i->second;
       }
     }

     virtual const PGLog &get_log() const = 0;
     virtual bool pgb_is_primary() const = 0;
     virtual const OSDMapRef& pgb_get_osdmap() const = 0;
     virtual epoch_t pgb_get_osdmap_epoch() const = 0;
     virtual const pg_info_t &get_info() const = 0;
     virtual const pg_pool_t &get_pool() const = 0;

     virtual ObjectContextRef get_obc(
       const hobject_t &hoid,
       const std::map<std::string, ceph::buffer::list> &attrs) = 0;

     virtual bool try_lock_for_read(
       const hobject_t &hoid,
       ObcLockManager &manager) = 0;

     virtual void release_locks(ObcLockManager &manager) = 0;

     virtual void op_applied(
       const eversion_t &applied_version) = 0;

     virtual bool should_send_op(
       pg_shard_t peer,
       const hobject_t &hoid) = 0;

     virtual bool pg_is_undersized() const = 0;
     virtual bool pg_is_repair() const = 0;

     virtual void log_operation(
       std::vector<pg_log_entry_t>&& logv,
       const std::optional<pg_hit_set_history_t> &hset_history,
       const eversion_t &trim_to,
       const eversion_t &roll_forward_to,
       const eversion_t &min_last_complete_ondisk,
       bool transaction_applied,
       ObjectStore::Transaction &t,
       bool async = false) = 0;

     virtual void pgb_set_object_snap_mapping(
       const hobject_t &soid,
       const std::set<snapid_t> &snaps,
       ObjectStore::Transaction *t) = 0;

     virtual void pgb_clear_object_snap_mapping(
       const hobject_t &soid,
       ObjectStore::Transaction *t) = 0;

     virtual void update_peer_last_complete_ondisk(
       pg_shard_t fromosd,
       eversion_t lcod) = 0;

     virtual void update_last_complete_ondisk(
       eversion_t lcod) = 0;

     virtual void update_stats(
       const pg_stat_t &stat) = 0;

     virtual void schedule_recovery_work(
       GenContext<ThreadPool::TPHandle&> *c) = 0;

     virtual pg_shard_t whoami_shard() const = 0;
     int whoami() const {
       return whoami_shard().osd;
     }
     spg_t whoami_spg_t() const {
       return get_info().pgid;
     }

     virtual spg_t primary_spg_t() const = 0;
     virtual pg_shard_t primary_shard() const = 0;
     virtual uint64_t min_peer_features() const = 0;
     virtual uint64_t min_upacting_features() const = 0;
     virtual hobject_t get_temp_recovery_object(const hobject_t& target,
						eversion_t version) = 0;

      virtual void send_message_osd_cluster(
       int peer, Message *m, epoch_t from_epoch) = 0;
      virtual void send_message_osd_cluster(
       std::vector<std::pair<int, Message*>>& messages, epoch_t from_epoch) = 0;
     virtual void send_message_osd_cluster(
       MessageRef, Connection *con) = 0;
     virtual void send_message_osd_cluster(
       Message *m, const ConnectionRef& con) = 0;
     virtual ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) = 0;
     virtual entity_name_t get_cluster_msgr_name() = 0;

     virtual PerfCounters *get_logger() = 0;

     virtual ceph_tid_t get_tid() = 0;

     virtual OstreamTemp clog_error() = 0;
     virtual OstreamTemp clog_warn() = 0;

     virtual bool check_failsafe_full() = 0;

     virtual bool pg_is_repair() = 0;
     virtual void inc_osd_stat_repaired() = 0;
     virtual bool pg_is_remote_backfilling() = 0;
     virtual void pg_add_local_num_bytes(int64_t num_bytes) = 0;
     virtual void pg_sub_local_num_bytes(int64_t num_bytes) = 0;
     virtual void pg_add_num_bytes(int64_t num_bytes) = 0;
     virtual void pg_sub_num_bytes(int64_t num_bytes) = 0;
     virtual bool maybe_preempt_replica_scrub(const hobject_t& oid) = 0;
     virtual ~Listener() {}
   };
   Listener *parent;
   Listener *get_parent() const { return parent; }
   PGBackend(CephContext* cct, Listener *l, ObjectStore *store, const coll_t &coll,
	     ObjectStore::CollectionHandle &ch) :
     cct(cct),
     store(store),
     coll(coll),
     ch(ch),
     parent(l) {}
   bool is_primary() const { return get_parent()->pgb_is_primary(); }
   const OSDMapRef& get_osdmap() const { return get_parent()->pgb_get_osdmap(); }
   epoch_t get_osdmap_epoch() const { return get_parent()->pgb_get_osdmap_epoch(); }
   const pg_info_t &get_info() { return get_parent()->get_info(); }

   std::ostream& gen_prefix(std::ostream& out) const {
     return parent->gen_dbg_prefix(out);
   }

   /**
    * RecoveryHandle
    *
    * We may want to recover multiple objects in the same std::set of
    * messages.  RecoveryHandle is an interface for the opaque
    * object used by the implementation to store the details of
    * the pending recovery operations.
    */
   struct RecoveryHandle {
     bool cache_dont_need;
     std::map<pg_shard_t, std::vector<std::pair<hobject_t, eversion_t> > > deletes;

     RecoveryHandle(): cache_dont_need(false) {}
     virtual ~RecoveryHandle() {}
   };

   /// Get a fresh recovery operation
   virtual RecoveryHandle *open_recovery_op() = 0;

   /// run_recovery_op: finish the operation represented by h
   virtual void run_recovery_op(
     RecoveryHandle *h,     ///< [in] op to finish
     int priority           ///< [in] msg priority
     ) = 0;

   void recover_delete_object(const hobject_t &oid, eversion_t v,
			      RecoveryHandle *h);
   void send_recovery_deletes(int prio,
			      const std::map<pg_shard_t, std::vector<std::pair<hobject_t, eversion_t> > > &deletes);

   /**
    * recover_object
    *
    * Triggers a recovery operation on the specified hobject_t
    * onreadable must be called before onwriteable
    *
    * On each replica (primary included), get_parent()->on_not_missing()
    * must be called when the transaction finalizing the recovery
    * is queued.  Similarly, get_parent()->on_readable() must be called
    * when the transaction is applied in the backing store.
    *
    * get_parent()->on_not_degraded() should be called on the primary
    * when writes can resume on the object.
    *
    * obc may be NULL if the primary lacks the object.
    *
    * head may be NULL only if the head/snapdir is missing
    *
    * @param missing [in] std::set of info, missing pairs for queried nodes
    * @param overlaps [in] mapping of object to file offset overlaps
    */
   virtual int recover_object(
     const hobject_t &hoid, ///< [in] object to recover
     eversion_t v,          ///< [in] version to recover
     ObjectContextRef head,  ///< [in] context of the head/snapdir object
     ObjectContextRef obc,  ///< [in] context of the object
     RecoveryHandle *h      ///< [in,out] handle to attach recovery op to
     ) = 0;

   /**
    * true if PGBackend can handle this message while inactive
    *
    * If it returns true, handle_message *must* also return true
    */
   virtual bool can_handle_while_inactive(OpRequestRef op) = 0;

   /// gives PGBackend a crack at an incoming message
   bool handle_message(
     OpRequestRef op ///< [in] message received
     ); ///< @return true if the message was handled

   /// the variant of handle_message that is overridden by child classes
   virtual bool _handle_message(OpRequestRef op) = 0;

   virtual void check_recovery_sources(const OSDMapRef& osdmap) = 0;


   /**
    * clean up any temporary on-disk state due to a pg interval change
    */
   void on_change_cleanup(ObjectStore::Transaction *t);
   /**
    * implementation should clear itself, contexts blessed prior to on_change
    * won't be called after on_change()
    */
   virtual void on_change() = 0;
   virtual void clear_recovery_state() = 0;

   virtual IsPGRecoverablePredicate *get_is_recoverable_predicate() const = 0;
   virtual IsPGReadablePredicate *get_is_readable_predicate() const = 0;
   virtual int get_ec_data_chunk_count() const { return 0; };
   virtual int get_ec_stripe_chunk_size() const { return 0; };

   virtual void dump_recovery_info(ceph::Formatter *f) const = 0;

 private:
   std::set<hobject_t> temp_contents;
 public:
   // Track contents of temp collection, clear on reset
   void add_temp_obj(const hobject_t &oid) {
     temp_contents.insert(oid);
   }
   void add_temp_objs(const std::set<hobject_t> &oids) {
     temp_contents.insert(oids.begin(), oids.end());
   }
   void clear_temp_obj(const hobject_t &oid) {
     temp_contents.erase(oid);
   }
   void clear_temp_objs(const std::set<hobject_t> &oids) {
     for (std::set<hobject_t>::const_iterator i = oids.begin();
	  i != oids.end();
	  ++i) {
       temp_contents.erase(*i);
     }
   }

   virtual ~PGBackend() {}

   /// execute implementation specific transaction
   virtual void submit_transaction(
     const hobject_t &hoid,               ///< [in] object
     const object_stat_sum_t &delta_stats,///< [in] stat change
     const eversion_t &at_version,        ///< [in] version
     PGTransactionUPtr &&t,               ///< [in] trans to execute (move)
     const eversion_t &trim_to,           ///< [in] trim log to here
     const eversion_t &min_last_complete_ondisk, ///< [in] lower bound on
                                                 ///  committed version
     std::vector<pg_log_entry_t>&& log_entries, ///< [in] log entries for t
     /// [in] hitset history (if updated with this transaction)
     std::optional<pg_hit_set_history_t> &hset_history,
     Context *on_all_commit,              ///< [in] called when all commit
     ceph_tid_t tid,                      ///< [in] tid
     osd_reqid_t reqid,                   ///< [in] reqid
     OpRequestRef op                      ///< [in] op
     ) = 0;

   /// submit callback to be called in order with pending writes
   virtual void call_write_ordered(std::function<void(void)> &&cb) = 0;

   void try_stash(
     const hobject_t &hoid,
     version_t v,
     ObjectStore::Transaction *t);

   void rollback(
     const pg_log_entry_t &entry,
     ObjectStore::Transaction *t);

   friend class LRBTrimmer;
   void rollforward(
     const pg_log_entry_t &entry,
     ObjectStore::Transaction *t);

   void trim(
     const pg_log_entry_t &entry,
     ObjectStore::Transaction *t);

   void remove(
     const hobject_t &hoid,
     ObjectStore::Transaction *t);

 protected:

   void handle_recovery_delete(OpRequestRef op);
   void handle_recovery_delete_reply(OpRequestRef op);

   /// Reapply old attributes
   void rollback_setattrs(
     const hobject_t &hoid,
     std::map<std::string, std::optional<ceph::buffer::list> > &old_attrs,
     ObjectStore::Transaction *t);

   /// Truncate object to rollback append
   virtual void rollback_append(
     const hobject_t &hoid,
     uint64_t old_size,
     ObjectStore::Transaction *t);

   /// Unstash object to rollback stash
   void rollback_stash(
     const hobject_t &hoid,
     version_t old_version,
     ObjectStore::Transaction *t);

   /// Unstash object to rollback stash
   void rollback_try_stash(
     const hobject_t &hoid,
     version_t old_version,
     ObjectStore::Transaction *t);

   /// Delete object to rollback create
   void rollback_create(
     const hobject_t &hoid,
     ObjectStore::Transaction *t) {
     remove(hoid, t);
   }

   /// Clone the extents back into place
   void rollback_extents(
     version_t gen,
     const std::vector<std::pair<uint64_t, uint64_t> > &extents,
     const hobject_t &hoid,
     ObjectStore::Transaction *t);
 public:

   /// Trim object stashed at version
   void trim_rollback_object(
     const hobject_t &hoid,
     version_t gen,
     ObjectStore::Transaction *t);

   /// Std::list objects in collection
   int objects_list_partial(
     const hobject_t &begin,
     int min,
     int max,
     std::vector<hobject_t> *ls,
     hobject_t *next);

   int objects_list_range(
     const hobject_t &start,
     const hobject_t &end,
     std::vector<hobject_t> *ls,
     std::vector<ghobject_t> *gen_obs=0);

   int objects_get_attr(
     const hobject_t &hoid,
     const std::string &attr,
     ceph::buffer::list *out);

   virtual int objects_get_attrs(
     const hobject_t &hoid,
     std::map<std::string, ceph::buffer::list> *out);

   virtual int objects_read_sync(
     const hobject_t &hoid,
     uint64_t off,
     uint64_t len,
     uint32_t op_flags,
     ceph::buffer::list *bl) = 0;

   virtual int objects_readv_sync(
     const hobject_t &hoid,
     std::map<uint64_t, uint64_t>&& m,
     uint32_t op_flags,
     ceph::buffer::list *bl) {
     return -EOPNOTSUPP;
   }

   virtual void objects_read_async(
     const hobject_t &hoid,
     const std::list<std::pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
		std::pair<ceph::buffer::list*, Context*> > > &to_read,
     Context *on_complete, bool fast_read = false) = 0;

   virtual bool auto_repair_supported() const = 0;
   int be_scan_list(
     ScrubMap &map,
     ScrubMapBuilder &pos);
   bool be_compare_scrub_objects(
     pg_shard_t auth_shard,
     const ScrubMap::object &auth,
     const object_info_t& auth_oi,
     const ScrubMap::object &candidate,
     shard_info_wrapper& shard_error,
     inconsistent_obj_wrapper &result,
     std::ostream &errorstream,
     bool has_snapset);
   std::map<pg_shard_t, ScrubMap *>::const_iterator be_select_auth_object(
     const hobject_t &obj,
     const std::map<pg_shard_t,ScrubMap*> &maps,
     object_info_t *auth_oi,
     std::map<pg_shard_t, shard_info_wrapper> &shard_map,
     bool &digest_match,
     spg_t pgid,
     std::ostream &errorstream);
   void be_compare_scrubmaps(
     const std::map<pg_shard_t,ScrubMap*> &maps,
     const std::set<hobject_t> &master_set,
     bool repair,
     std::map<hobject_t, std::set<pg_shard_t>> &missing,
     std::map<hobject_t, std::set<pg_shard_t>> &inconsistent,
     std::map<hobject_t, std::list<pg_shard_t>> &authoritative,
     std::map<hobject_t, std::pair<std::optional<uint32_t>,
                         std::optional<uint32_t>>> &missing_digest,
     int &shallow_errors, int &deep_errors,
     Scrub::Store *store,
     const spg_t& pgid,
     const std::vector<int> &acting,
     std::ostream &errorstream);
   virtual uint64_t be_get_ondisk_size(
     uint64_t logical_size) = 0;
   virtual int be_deep_scrub(
     const hobject_t &oid,
     ScrubMap &map,
     ScrubMapBuilder &pos,
     ScrubMap::object &o) = 0;
   void be_omap_checks(
     const std::map<pg_shard_t,ScrubMap*> &maps,
     const std::set<hobject_t> &master_set,
     omap_stat_t& omap_stats,
     std::ostream &warnstream) const;

   static PGBackend *build_pg_backend(
     const pg_pool_t &pool,
     const std::map<std::string,std::string>& profile,
     Listener *l,
     coll_t coll,
     ObjectStore::CollectionHandle &ch,
     ObjectStore *store,
     CephContext *cct);
};

#endif