// Don't throw exception here by ourselves but leave the decision to take by S3 server.
LOG_WARNING(&Logger::get("WriteBufferFromS3"),"Maximum part number in S3 protocol has reached (too much parts). Server may not accept this whole upload.");
put_communication_data(started_cluster,'=== Put test ===')
values='(1, 2, 3), (3, 2, 1), (78, 43, 45)'
logging.info("Phase 3")
put_communication_data(started_cluster,"=== Put test ===")
values="(1, 2, 3), (3, 2, 1), (78, 43, 45)"
put_query="insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(started_cluster.mock_host,started_cluster.preserving_data_port,started_cluster.bucket,format,values)
put_communication_data(started_cluster,'=== Put test CSV ===')
put_communication_data(started_cluster,"=== Put test CSV ===")
put_query="insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') format CSV".format(started_cluster.mock_host,started_cluster.preserving_data_port,started_cluster.bucket,format)
put_communication_data(started_cluster,'=== Put with redirect test ===')
other_values='(1, 1, 1), (1, 1, 1), (11, 11, 11)'
put_communication_data(started_cluster,"=== Put with redirect test ===")
other_values="(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query="insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(started_cluster.mock_host,started_cluster.redirecting_preserving_data_port,started_cluster.bucket,format,other_values)
run_query(instance,query)
query="select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(started_cluster.mock_host,started_cluster.preserving_data_port,started_cluster.bucket,format)
put_query="insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') format CSV".format(started_cluster.mock_host,started_cluster.multipart_preserving_data_port,started_cluster.bucket,format)