Skip to content

Commit 9234e16

Browse files
committed
deprecate unused parameter of AddRowGroup()
1 parent 857092c commit 9234e16

File tree

4 files changed

+15
-11
lines changed

4 files changed

+15
-11
lines changed

Diff for: cpp/src/arrow/dataset/file_parquet_test.cc

+1-2
Original file line numberDiff line numberDiff line change
@@ -85,15 +85,14 @@ class ParquetFormatHelper {
8585
static Status WriteRecordBatch(const RecordBatch& batch,
8686
parquet::arrow::FileWriter* writer) {
8787
auto schema = batch.schema();
88-
auto size = batch.num_rows();
8988

9089
if (!schema->Equals(*writer->schema(), false)) {
9190
return Status::Invalid("RecordBatch schema does not match this writer's. batch:'",
9291
schema->ToString(), "' this:'", writer->schema()->ToString(),
9392
"'");
9493
}
9594

96-
RETURN_NOT_OK(writer->NewRowGroup(size));
95+
RETURN_NOT_OK(writer->NewRowGroup());
9796
for (int i = 0; i < batch.num_columns(); i++) {
9897
RETURN_NOT_OK(writer->WriteColumnChunk(*batch.column(i)));
9998
}

Diff for: cpp/src/parquet/arrow/arrow_reader_writer_test.cc

+5-5
Original file line numberDiff line numberDiff line change
@@ -739,7 +739,7 @@ class ParquetIOTestBase : public ::testing::Test {
739739
ASSERT_OK_NO_THROW(FileWriter::Make(::arrow::default_memory_pool(),
740740
MakeWriter(schema), arrow_schema,
741741
default_arrow_writer_properties(), &writer));
742-
ASSERT_OK_NO_THROW(writer->NewRowGroup(values->length()));
742+
ASSERT_OK_NO_THROW(writer->NewRowGroup());
743743
ASSERT_OK_NO_THROW(writer->WriteColumnChunk(*values));
744744
ASSERT_OK_NO_THROW(writer->Close());
745745
// writer->Close() should be idempotent
@@ -1053,7 +1053,7 @@ TYPED_TEST(TestParquetIO, SingleColumnRequiredChunkedWrite) {
10531053
this->MakeWriter(schema), arrow_schema,
10541054
default_arrow_writer_properties(), &writer));
10551055
for (int i = 0; i < 4; i++) {
1056-
ASSERT_OK_NO_THROW(writer->NewRowGroup(chunk_size));
1056+
ASSERT_OK_NO_THROW(writer->NewRowGroup());
10571057
std::shared_ptr<Array> sliced_array = values->Slice(i * chunk_size, chunk_size);
10581058
ASSERT_OK_NO_THROW(writer->WriteColumnChunk(*sliced_array));
10591059
}
@@ -1126,7 +1126,7 @@ TYPED_TEST(TestParquetIO, SingleColumnOptionalChunkedWrite) {
11261126
this->MakeWriter(schema), arrow_schema,
11271127
default_arrow_writer_properties(), &writer));
11281128
for (int i = 0; i < 4; i++) {
1129-
ASSERT_OK_NO_THROW(writer->NewRowGroup(chunk_size));
1129+
ASSERT_OK_NO_THROW(writer->NewRowGroup());
11301130
std::shared_ptr<Array> sliced_array = values->Slice(i * chunk_size, chunk_size);
11311131
ASSERT_OK_NO_THROW(writer->WriteColumnChunk(*sliced_array));
11321132
}
@@ -5128,7 +5128,7 @@ class TestIntegerAnnotateDecimalTypeParquetIO : public TestParquetIO<TestType> {
51285128
::arrow::default_memory_pool(),
51295129
ParquetFileWriter::Open(this->sink_, schema_node, writer_properties),
51305130
arrow_schema, default_arrow_writer_properties(), &writer));
5131-
ASSERT_OK_NO_THROW(writer->NewRowGroup(values->length()));
5131+
ASSERT_OK_NO_THROW(writer->NewRowGroup());
51325132
ASSERT_OK_NO_THROW(writer->WriteColumnChunk(*values));
51335133
ASSERT_OK_NO_THROW(writer->Close());
51345134
}
@@ -5460,7 +5460,7 @@ TEST(TestArrowReadWrite, OperationsOnClosedWriter) {
54605460
// Operations on closed writer are invalid
54615461
ASSERT_OK(writer->Close());
54625462

5463-
ASSERT_RAISES(Invalid, writer->NewRowGroup(1));
5463+
ASSERT_RAISES(Invalid, writer->NewRowGroup());
54645464
ASSERT_RAISES(Invalid, writer->WriteColumnChunk(table->column(0), 0, 1));
54655465
ASSERT_RAISES(Invalid, writer->NewBufferedRowGroup());
54665466
ASSERT_OK_AND_ASSIGN(auto record_batch, table->CombineChunksToBatch());

Diff for: cpp/src/parquet/arrow/writer.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ class FileWriterImpl : public FileWriter {
305305
default_arrow_reader_properties(), &schema_manifest_);
306306
}
307307

308-
Status NewRowGroup(int64_t chunk_size) override {
308+
Status NewRowGroup() override {
309309
RETURN_NOT_OK(CheckClosed());
310310
if (row_group_writer_ != nullptr) {
311311
PARQUET_CATCH_NOT_OK(row_group_writer_->Close());
@@ -379,7 +379,7 @@ class FileWriterImpl : public FileWriter {
379379
}
380380

381381
auto WriteRowGroup = [&](int64_t offset, int64_t size) {
382-
RETURN_NOT_OK(NewRowGroup(size));
382+
RETURN_NOT_OK(NewRowGroup());
383383
for (int i = 0; i < table.num_columns(); i++) {
384384
RETURN_NOT_OK(WriteColumnChunk(table.column(i), offset, size));
385385
}

Diff for: cpp/src/parquet/arrow/writer.h

+7-2
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,14 @@ class PARQUET_EXPORT FileWriter {
8787
/// \brief Start a new row group.
8888
///
8989
/// Returns an error if not all columns have been written.
90+
virtual ::arrow::Status NewRowGroup() = 0;
91+
92+
/// \brief Start a new row group.
9093
///
91-
/// \param chunk_size the number of rows in the next row group.
92-
virtual ::arrow::Status NewRowGroup(int64_t chunk_size) = 0;
94+
/// \deprecated Deprecated in 19.0.0.
95+
ARROW_DEPRECATED(
96+
"Deprecated in 19.0.0. Use NewRowGroup() without the `chunk_size` argument.")
97+
virtual ::arrow::Status NewRowGroup(int64_t chunk_size) { return NewRowGroup(); }
9398

9499
/// \brief Write ColumnChunk in row group using an array.
95100
virtual ::arrow::Status WriteColumnChunk(const ::arrow::Array& data) = 0;

0 commit comments

Comments
 (0)