summaryrefslogtreecommitdiffstats
path: root/src/rgw/rgw_civetweb_frontend.cc
blob: 6c672db49b6b5b5840816456b28befe3d40c5dfe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp

#include <set>
#include <string>
#include <string_view>

#include "rgw_frontend.h"
#include "rgw_client_io_filters.h"
#include "rgw_dmclock_sync_scheduler.h"

#define dout_subsys ceph_subsys_rgw

namespace dmc = rgw::dmclock;

RGWCivetWebFrontend::RGWCivetWebFrontend(RGWProcessEnv& env,
					 RGWFrontendConfig *conf,
					 dmc::SchedulerCtx& sched_ctx)
  : conf(conf),
    ctx(nullptr),
    env(env)
{

  auto sched_t = dmc::get_scheduler_t(cct());
  switch(sched_t){
  case dmc::scheduler_t::none: [[fallthrough]];
  case dmc::scheduler_t::throttler:
    break;
  case dmc::scheduler_t::dmclock:
    // TODO: keep track of server ready state and use that here civetweb
    // internally tracks in the ctx the threads used and free, while it is
    // expected with the current implementation that the threads waiting on the
    // queue would still show up in the "used" queue, it might be a useful thing
    // to make decisions on in the future. Also while reconfiguring we should
    // probably set this to false
    auto server_ready_f = []() -> bool { return true; };

    scheduler.reset(new dmc::SyncScheduler(cct(),
					   std::ref(sched_ctx.get_dmc_client_counters()),
					   *sched_ctx.get_dmc_client_config(),
					   server_ready_f,
					   std::ref(dmc::SyncScheduler::handle_request_cb),
					   dmc::AtLimit::Reject));
  }

}

static int civetweb_callback(struct mg_connection* conn)
{
  const struct mg_request_info* const req_info = mg_get_request_info(conn);
  return static_cast<RGWCivetWebFrontend *>(req_info->user_data)->process(conn);
}

int RGWCivetWebFrontend::process(struct mg_connection*  const conn)
{
  /* Hold a read lock over access to env.store for reconfiguration. */
  std::shared_lock lock{env.mutex};

  RGWCivetWeb cw_client(conn);
  auto real_client_io = rgw::io::add_reordering(
                          rgw::io::add_buffering(dout_context,
                            rgw::io::add_chunking(
                              rgw::io::add_conlen_controlling(
                                &cw_client))));
  RGWRestfulIO client_io(dout_context, &real_client_io);

  RGWRequest req(env.store->getRados()->get_new_req_id());
  int http_ret = 0;
  ceph::coarse_real_clock::duration latency{};
  //assert (scheduler != nullptr);
  int ret = process_request(env.store, env.rest, &req, env.uri_prefix,
                            *env.auth_registry, &client_io, env.olog,
                            null_yield, scheduler.get(), nullptr, &latency, &http_ret);
  if (ret < 0) {
    /* We don't really care about return code. */
    dout(20) << "process_request() returned " << ret << dendl;
  }

  if (http_ret <= 0) {
    /* Mark as processed. */
    return 1;
  }

  return http_ret;
}

int RGWCivetWebFrontend::run()
{
  auto& conf_map = conf->get_config_map();

  set_conf_default(conf_map, "num_threads",
                   std::to_string(g_conf()->rgw_thread_pool_size));
  set_conf_default(conf_map, "decode_url", "no");
  set_conf_default(conf_map, "enable_keep_alive", "yes");
  set_conf_default(conf_map, "validate_http_method", "no");
  set_conf_default(conf_map, "canonicalize_url_path", "no");
  set_conf_default(conf_map, "enable_auth_domain_check", "no");
  set_conf_default(conf_map, "allow_unicode_in_urls", "yes");
  set_conf_default(conf_map, "request_timeout_ms", "65000");

  std::string listening_ports;
  // support multiple port= entries
  auto range = conf_map.equal_range("port");
  for (auto p = range.first; p != range.second; ++p) {
    std::string port_str = p->second;
    // support port= entries with multiple values
    std::replace(port_str.begin(), port_str.end(), '+', ',');
    if (!listening_ports.empty()) {
      listening_ports.append(1, ',');
    }
    listening_ports.append(port_str);
  }
  if (listening_ports.empty()) {
    listening_ports = "80";
  }
  conf_map.emplace("listening_ports", std::move(listening_ports));

  /* Set run_as_user. This will cause civetweb to invoke setuid() and setgid()
   * based on pw_uid and pw_gid obtained from pw_name. */
  std::string uid_string = g_ceph_context->get_set_uid_string();
  if (! uid_string.empty()) {
    conf_map.emplace("run_as_user", std::move(uid_string));
  }

  /* Prepare options for CivetWeb. */
  const std::set<std::string_view> rgw_opts = { "port", "prefix" };

  std::vector<const char*> options;

  for (const auto& pair : conf_map) {
    if (! rgw_opts.count(pair.first)) {
      /* CivetWeb doesn't understand configurables of the glue layer between
       * it and RadosGW. We need to strip them out. Otherwise CivetWeb would
       * signalise an error. */
      options.push_back(pair.first.c_str());
      options.push_back(pair.second.c_str());

      dout(20) << "civetweb config: " << pair.first
               << ": " << pair.second << dendl;
    }
  }

  options.push_back(nullptr);
  /* Initialize the CivetWeb right now. */
  struct mg_callbacks cb;
  // FIPS zeroization audit 20191115: this memset is not security related.
  memset((void *)&cb, 0, sizeof(cb));
  cb.begin_request = civetweb_callback;
  cb.log_message = rgw_civetweb_log_callback;
  cb.log_access = rgw_civetweb_log_access_callback;
  ctx = mg_start(&cb, this, options.data());

  return ! ctx ? -EIO : 0;
} /* RGWCivetWebFrontend::run */