1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_realm_reloader.h"
#include "rgw_auth_registry.h"
#include "rgw_bucket.h"
#include "rgw_log.h"
#include "rgw_rest.h"
#include "rgw_user.h"
#include "rgw_process_env.h"
#include "rgw_sal.h"
#include "rgw_sal_rados.h"
#include "services/svc_zone.h"
#include "common/errno.h"
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
#define dout_prefix (*_dout << "rgw realm reloader: ")
// safe callbacks from SafeTimer are unneccessary. reload() can take a long
// time, so we don't want to hold the mutex and block handle_notify() for the
// duration
static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
RGWRealmReloader::RGWRealmReloader(RGWProcessEnv& env,
const rgw::auth::ImplicitTenants& implicit_tenants,
std::map<std::string, std::string>& service_map_meta,
Pauser* frontends)
: env(env),
implicit_tenants(implicit_tenants),
service_map_meta(service_map_meta),
frontends(frontends),
timer(env.driver->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS),
mutex(ceph::make_mutex("RGWRealmReloader")),
reload_scheduled(nullptr)
{
timer.init();
}
RGWRealmReloader::~RGWRealmReloader()
{
std::lock_guard lock{mutex};
timer.shutdown();
}
class RGWRealmReloader::C_Reload : public Context {
RGWRealmReloader* reloader;
public:
explicit C_Reload(RGWRealmReloader* reloader) : reloader(reloader) {}
void finish(int r) override { reloader->reload(); }
};
void RGWRealmReloader::handle_notify(RGWRealmNotify type,
bufferlist::const_iterator& p)
{
if (!env.driver) {
/* we're in the middle of reload */
return;
}
CephContext *const cct = env.driver->ctx();
std::lock_guard lock{mutex};
if (reload_scheduled) {
ldout(cct, 4) << "Notification on realm, reconfiguration "
"already scheduled" << dendl;
return;
}
reload_scheduled = new C_Reload(this);
cond.notify_one(); // wake reload() if it blocked on a bad configuration
// schedule reload() without delay
timer.add_event_after(0, reload_scheduled);
ldout(cct, 4) << "Notification on realm, reconfiguration scheduled" << dendl;
}
void RGWRealmReloader::reload()
{
CephContext *const cct = env.driver->ctx();
const DoutPrefix dp(cct, dout_subsys, "rgw realm reloader: ");
ldpp_dout(&dp, 1) << "Pausing frontends for realm update..." << dendl;
frontends->pause();
ldpp_dout(&dp, 1) << "Frontends paused" << dendl;
// TODO: make RGWRados responsible for rgw_log_usage lifetime
rgw_log_usage_finalize();
// destroy the existing driver
DriverManager::close_storage(env.driver);
env.driver = nullptr;
ldpp_dout(&dp, 1) << "driver closed" << dendl;
{
// allow a new notify to reschedule us. it's important that we do this
// before we start loading the new realm, or we could miss some updates
std::lock_guard lock{mutex};
reload_scheduled = nullptr;
}
while (!env.driver) {
// recreate and initialize a new driver
DriverManager::Config cfg;
cfg.store_name = "rados";
cfg.filter_name = "none";
env.driver =
DriverManager::get_storage(&dp, cct,
cfg,
cct->_conf->rgw_enable_gc_threads,
cct->_conf->rgw_enable_lc_threads,
cct->_conf->rgw_enable_quota_threads,
cct->_conf->rgw_run_sync_thread,
cct->_conf.get_val<bool>("rgw_dynamic_resharding"),
cct->_conf->rgw_cache_enabled);
ldpp_dout(&dp, 1) << "Creating new driver" << dendl;
rgw::sal::Driver* store_cleanup = nullptr;
{
std::unique_lock lock{mutex};
// failure to recreate RGWRados is not a recoverable error, but we
// don't want to assert or abort the entire cluster. instead, just
// sleep until we get another notification, and retry until we get
// a working configuration
if (env.driver == nullptr) {
ldpp_dout(&dp, -1) << "Failed to reinitialize RGWRados after a realm "
"configuration update. Waiting for a new update." << dendl;
// sleep until another event is scheduled
cond.wait(lock, [this] { return reload_scheduled; });
ldout(cct, 1) << "Woke up with a new configuration, retrying "
"RGWRados initialization." << dendl;
}
if (reload_scheduled) {
// cancel the event; we'll handle it now
timer.cancel_event(reload_scheduled);
reload_scheduled = nullptr;
// if we successfully created a driver, clean it up outside of the lock,
// then continue to loop and recreate another
std::swap(env.driver, store_cleanup);
}
}
if (store_cleanup) {
ldpp_dout(&dp, 4) << "Got another notification, restarting RGWRados "
"initialization." << dendl;
DriverManager::close_storage(store_cleanup);
}
}
int r = env.driver->register_to_service_map(&dp, "rgw", service_map_meta);
if (r < 0) {
ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
/* ignore error */
}
ldpp_dout(&dp, 1) << "Finishing initialization of new driver" << dendl;
// finish initializing the new driver
ldpp_dout(&dp, 1) << " - REST subsystem init" << dendl;
rgw_rest_init(cct, env.driver->get_zone()->get_zonegroup());
ldpp_dout(&dp, 1) << " - usage subsystem init" << dendl;
rgw_log_usage_init(cct, env.driver);
/* Initialize the registry of auth strategies which will coordinate
* the dynamic reconfiguration. */
env.auth_registry = rgw::auth::StrategyRegistry::create(
cct, implicit_tenants, env.driver);
env.lua.manager = env.driver->get_lua_manager();
ldpp_dout(&dp, 1) << "Resuming frontends with new realm configuration." << dendl;
frontends->resume(env.driver);
}
|