summaryrefslogtreecommitdiffstats
path: root/src/rgw/rgw_s3select.cc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:13 +0000
commit389020e14594e4894e28d1eb9103c210b142509e (patch)
tree2ba734cdd7a243f46dda7c3d0cc88c2293d9699f /src/rgw/rgw_s3select.cc
parentAdding upstream version 18.2.2. (diff)
downloadceph-389020e14594e4894e28d1eb9103c210b142509e.tar.xz
ceph-389020e14594e4894e28d1eb9103c210b142509e.zip
Adding upstream version 18.2.3.upstream/18.2.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/rgw/rgw_s3select.cc')
-rw-r--r--src/rgw/rgw_s3select.cc14
1 files changed, 9 insertions, 5 deletions
diff --git a/src/rgw/rgw_s3select.cc b/src/rgw/rgw_s3select.cc
index c7eaa6984..4080d0938 100644
--- a/src/rgw/rgw_s3select.cc
+++ b/src/rgw/rgw_s3select.cc
@@ -408,7 +408,7 @@ int RGWSelectObj_ObjStore_S3::run_s3select_on_csv(const char* query, const char*
} else if(m_header_info.compare("USE")==0) {
csv.use_header_info=true;
}
- //m_s3_csv_object.set_external_debug_system(fp_debug_mesg);
+ m_s3_csv_object.set_external_debug_system(fp_debug_mesg);
m_s3_csv_object.set_result_formatters(fp_s3select_result_format,fp_result_header_format);
m_s3_csv_object.set_csv_query(&s3select_syntax, csv);
if (s3select_syntax.get_error_description().empty() == false) {
@@ -527,7 +527,8 @@ int RGWSelectObj_ObjStore_S3::run_s3select_on_json(const char* query, const char
}
//initializing json processor
- m_s3_json_object.set_json_query(&s3select_syntax);
+ json_object::csv_definitions output_definition;
+ m_s3_json_object.set_json_query(&s3select_syntax,output_definition);
if (input == nullptr) {
input = "";
@@ -862,20 +863,23 @@ int RGWSelectObj_ObjStore_S3::csv_processing(bufferlist& bl, off_t ofs, off_t le
continue;
}
- if((ofs + len) > it.length()){
+
+ if(ofs > it.length()){
+ //safety check
ldpp_dout(this, 10) << "offset and length may cause invalid read: ofs = " << ofs << " len = " << len << " it.length() = " << it.length() << dendl;
ofs = 0;
- len = it.length();
}
if(m_is_trino_request){
+ //TODO replace len with it.length() ? ; test Trino flow with compressed objects.
+ //is it possible to send get-by-ranges? in parallel?
shape_chunk_per_trino_requests(&(it)[0], ofs, len);
}
ldpp_dout(this, 10) << "s3select: chunk: ofs = " << ofs << " len = " << len << " it.length() = " << it.length() << " m_object_size_for_processing = " << m_object_size_for_processing << dendl;
m_aws_response_handler.update_processed_size(it.length());//NOTE : to run analysis to validate len is aligned with m_processed_bytes
- status = run_s3select_on_csv(m_sql_query.c_str(), &(it)[0] + ofs, len);
+ status = run_s3select_on_csv(m_sql_query.c_str(), &(it)[0] + ofs, it.length());
if (status<0) {
return -EINVAL;
}