From 8daa83a594a2e98f39d764422bfbdbc62c9efd44 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 19:20:00 +0200 Subject: Adding upstream version 2:4.20.0+dfsg. Signed-off-by: Daniel Baumann --- source3/torture/test_rpc_scale.c | 301 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 301 insertions(+) create mode 100644 source3/torture/test_rpc_scale.c (limited to 'source3/torture/test_rpc_scale.c') diff --git a/source3/torture/test_rpc_scale.c b/source3/torture/test_rpc_scale.c new file mode 100644 index 0000000..6ef26f3 --- /dev/null +++ b/source3/torture/test_rpc_scale.c @@ -0,0 +1,301 @@ +/* + * Unix SMB/CIFS implementation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "source3/include/includes.h" +#include "source3/torture/proto.h" +#include "source3/libsmb/libsmb.h" +#include "librpc/gen_ndr/ndr_spoolss_c.h" +#include "lib/util/tevent_ntstatus.h" +#include "source3/rpc_client/rpc_client.h" +#include "source3/rpc_client/cli_pipe.h" +#include "libcli/smb/smbXcli_base.h" + +extern int torture_nprocs; +extern int torture_numops; + +struct rpc_scale_one_state { + struct tevent_context *ev; + struct cli_state *cli; + size_t num_iterations; + struct rpc_pipe_client *rpccli; + DATA_BLOB buffer; + uint32_t needed; + uint32_t num_printers; + union spoolss_PrinterInfo *printers; +}; + +static void rpc_scale_one_opened(struct tevent_req *subreq); +static void rpc_scale_one_bound(struct tevent_req *subreq); +static void rpc_scale_one_listed(struct tevent_req *subreq); + +static struct tevent_req *rpc_scale_one_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct cli_state *cli, + size_t num_iterations) +{ + struct tevent_req *req = NULL, *subreq = NULL; + struct rpc_scale_one_state *state = NULL; + + req = tevent_req_create(mem_ctx, &state, struct rpc_scale_one_state); + if (req == NULL) { + return NULL; + } + state->ev = ev; + state->cli = cli; + state->num_iterations = num_iterations; + + subreq = rpc_pipe_open_np_send( + state, ev, cli, &ndr_table_spoolss); + if (tevent_req_nomem(subreq, req)) { + return tevent_req_post(req, ev); + } + tevent_req_set_callback(subreq, rpc_scale_one_opened, req); + return req; +} + +static void rpc_scale_one_opened(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_scale_one_state *state = tevent_req_data( + req, struct rpc_scale_one_state); + struct pipe_auth_data *auth = NULL; + NTSTATUS status; + + status = rpc_pipe_open_np_recv(subreq, state, &state->rpccli); + TALLOC_FREE(subreq); + if (tevent_req_nterror(req, status)) { + return; + } + + status = rpccli_anon_bind_data(state, &auth); + if (tevent_req_nterror(req, status)) { + return; + } + + subreq = rpc_pipe_bind_send(state, state->ev, state->rpccli, auth); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, rpc_scale_one_bound, req); +} + +static void rpc_scale_one_bound(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_scale_one_state *state = tevent_req_data( + req, struct rpc_scale_one_state); + char *server = NULL; + NTSTATUS status; + + status = rpc_pipe_bind_recv(subreq); + if (tevent_req_nterror(req, status)) { + return; + } + + server = talloc_asprintf( + state, + "\\%s\n", + smbXcli_conn_remote_name(state->cli->conn)); + if (tevent_req_nomem(server, req)) { + return; + } + state->buffer = data_blob_talloc(state, NULL, 4096); + if (tevent_req_nomem(state->buffer.data, req)) { + return; + } + + subreq = dcerpc_spoolss_EnumPrinters_send( + state, + state->ev, + state->rpccli->binding_handle, + PRINTER_ENUM_LOCAL, + server, + 1, /* level */ + &state->buffer, + state->buffer.length, + &state->num_printers, + &state->printers, + &state->needed); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, rpc_scale_one_listed, req); +} + +static void rpc_scale_one_listed(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_scale_one_state *state = tevent_req_data( + req, struct rpc_scale_one_state); + NTSTATUS status; + WERROR result; + + status = dcerpc_spoolss_EnumPrinters_recv(subreq, state, &result); + if (tevent_req_nterror(req, status)) { + return; + } + + if (!W_ERROR_IS_OK(result)) { + status = werror_to_ntstatus(result); + tevent_req_nterror(req, status); + return; + } + + /* + * This will trigger a sync close. Making that async will be a + * lot of effort, and even with this being sync this test is + * nasty enough. + */ + TALLOC_FREE(state->rpccli); + + state->num_iterations -= 1; + + if (state->num_iterations == 0) { + tevent_req_done(req); + return; + } + + subreq = rpc_pipe_open_np_send( + state, state->ev, state->cli, &ndr_table_spoolss); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, rpc_scale_one_opened, req); +} + +static NTSTATUS rpc_scale_one_recv(struct tevent_req *req) +{ + return tevent_req_simple_recv_ntstatus(req); +} + +struct rpc_scale_state { + size_t num_reqs; + size_t done; +}; + +static void rpc_scale_done(struct tevent_req *subreq); + +static struct tevent_req *rpc_scale_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct cli_state **clis) +{ + struct tevent_req *req = NULL; + struct rpc_scale_state *state = NULL; + size_t i, num_clis = talloc_array_length(clis); + + req = tevent_req_create(mem_ctx, &state, struct rpc_scale_state); + if (req == NULL) { + return NULL; + } + state->num_reqs = num_clis; + + for (i=0; idone += 1; + + if (state->done == state->num_reqs) { + tevent_req_done(req); + } +} + +static NTSTATUS rpc_scale_recv(struct tevent_req *req) +{ + return tevent_req_simple_recv_ntstatus(req); +} + +bool run_rpc_scale(int dummy) +{ + TALLOC_CTX *frame = talloc_stackframe(); + struct cli_state **clis = NULL; + struct tevent_req *req = NULL; + struct tevent_context *ev = NULL; + bool ok, result = false; + NTSTATUS status; + int i; + + clis = talloc_zero_array( + talloc_tos(), struct cli_state *, torture_nprocs); + if (clis == NULL) { + fprintf(stderr, "talloc failed\n"); + goto fail; + } + + for (i=0; i