Skip to content

Commit c9dfece

Browse files
committed
update multi versions
fix checkstyle
1 parent d9679ad commit c9dfece

23 files changed

+193
-70
lines changed

.palantir/revapi.yml

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1201,6 +1201,33 @@ acceptedBreaks:
12011201
old: "method void org.apache.iceberg.MetadataUpdate.SetStatistics::<init>(long,\
12021202
\ org.apache.iceberg.StatisticsFile)"
12031203
justification: "Removing deprecated code"
1204+
org.apache.iceberg:iceberg-data:
1205+
- code: "java.method.numberOfParametersChanged"
1206+
old: "method void org.apache.iceberg.data.GenericAppenderFactory::<init>(org.apache.iceberg.Schema,\
1207+
\ org.apache.iceberg.PartitionSpec, int[], org.apache.iceberg.Schema, org.apache.iceberg.Schema)"
1208+
new: "method void org.apache.iceberg.data.GenericAppenderFactory::<init>(org.apache.iceberg.Table,\
1209+
\ int[], org.apache.iceberg.Schema, org.apache.iceberg.Schema)"
1210+
justification: "The previous API should be deprecated because, by default, it\
1211+
\ prevents table properties from being used by the metric config."
1212+
- code: "java.method.parameterTypeChanged"
1213+
old: "parameter void org.apache.iceberg.data.GenericAppenderFactory::<init>(===org.apache.iceberg.Schema===)"
1214+
new: "parameter void org.apache.iceberg.data.GenericAppenderFactory::<init>(===org.apache.iceberg.Table===)"
1215+
justification: "The previous API should be deprecated because, by default, it\
1216+
\ prevents table properties from being used by the metric config."
1217+
- code: "java.method.parameterTypeChanged"
1218+
old: "parameter void org.apache.iceberg.data.GenericAppenderFactory::<init>(===org.apache.iceberg.Schema===,\
1219+
\ org.apache.iceberg.PartitionSpec)"
1220+
new: "parameter void org.apache.iceberg.data.GenericAppenderFactory::<init>(===org.apache.iceberg.Table===,\
1221+
\ org.apache.iceberg.Schema)"
1222+
justification: "The previous API should be deprecated because, by default, it\
1223+
\ prevents table properties from being used by the metric config."
1224+
- code: "java.method.parameterTypeChanged"
1225+
old: "parameter void org.apache.iceberg.data.GenericAppenderFactory::<init>(org.apache.iceberg.Schema,\
1226+
\ ===org.apache.iceberg.PartitionSpec===)"
1227+
new: "parameter void org.apache.iceberg.data.GenericAppenderFactory::<init>(org.apache.iceberg.Table,\
1228+
\ ===org.apache.iceberg.Schema===)"
1229+
justification: "The previous API should be deprecated because, by default, it\
1230+
\ prevents table properties from being used by the metric config."
12041231
org.apache.iceberg:iceberg-parquet:
12051232
- code: "java.class.visibilityReduced"
12061233
old: "class org.apache.iceberg.data.parquet.BaseParquetReaders<T extends java.lang.Object>"

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/actions/TestRewriteDataFilesAction.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -331,8 +331,8 @@ public void testRewriteLargeTableHasResiduals() throws IOException {
331331
@TestTemplate
332332
public void testRewriteAvoidRepeateCompress() throws IOException {
333333
List<Record> expected = Lists.newArrayList();
334-
Schema schema = icebergTableUnPartitioned.schema();
335-
GenericAppenderFactory genericAppenderFactory = new GenericAppenderFactory(schema);
334+
GenericAppenderFactory genericAppenderFactory =
335+
new GenericAppenderFactory(icebergTableUnPartitioned);
336336
File file = File.createTempFile("junit", null, temp.toFile());
337337
int count = 0;
338338
try (FileAppender<Record> fileAppender =

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/assigner/TestWatermarkBasedSplitAssigner.java

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import static org.apache.iceberg.types.Types.NestedField.required;
2222
import static org.assertj.core.api.Assertions.assertThat;
2323

24+
import java.io.File;
2425
import java.io.IOException;
2526
import java.time.Instant;
2627
import java.time.LocalDateTime;
@@ -32,6 +33,7 @@
3233
import java.util.stream.IntStream;
3334
import org.apache.iceberg.FileFormat;
3435
import org.apache.iceberg.Schema;
36+
import org.apache.iceberg.Table;
3537
import org.apache.iceberg.data.GenericAppenderFactory;
3638
import org.apache.iceberg.data.GenericRecord;
3739
import org.apache.iceberg.data.RandomGenericData;
@@ -42,15 +44,28 @@
4244
import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
4345
import org.apache.iceberg.flink.source.split.SerializableComparator;
4446
import org.apache.iceberg.flink.source.split.SplitComparators;
47+
import org.apache.iceberg.hadoop.HadoopTables;
4548
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
4649
import org.apache.iceberg.types.Types;
4750
import org.apache.iceberg.util.SerializationUtil;
51+
import org.junit.jupiter.api.BeforeAll;
4852
import org.junit.jupiter.api.Test;
53+
import org.junit.jupiter.api.io.TempDir;
4954

5055
public class TestWatermarkBasedSplitAssigner extends SplitAssignerTestBase {
5156
public static final Schema SCHEMA =
5257
new Schema(required(1, "timestamp_column", Types.TimestampType.withoutZone()));
53-
private static final GenericAppenderFactory APPENDER_FACTORY = new GenericAppenderFactory(SCHEMA);
58+
59+
@TempDir private static File tableLocation;
60+
61+
private static GenericAppenderFactory appenderFactory;
62+
63+
@BeforeAll
64+
public static void beforeAll() {
65+
HadoopTables tables = new HadoopTables();
66+
Table table = tables.create(SCHEMA, tableLocation.toPath().toString());
67+
appenderFactory = new GenericAppenderFactory(table);
68+
}
5469

5570
@Override
5671
protected SplitAssigner splitAssigner() {
@@ -138,7 +153,7 @@ private IcebergSourceSplit splitFromRecords(List<List<Record>> records) {
138153
try {
139154
return IcebergSourceSplit.fromCombinedScanTask(
140155
ReaderUtil.createCombinedScanTask(
141-
records, temporaryFolder, FileFormat.PARQUET, APPENDER_FACTORY));
156+
records, temporaryFolder, FileFormat.PARQUET, appenderFactory));
142157
} catch (IOException e) {
143158
throw new RuntimeException("Split creation exception", e);
144159
}

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderFunctionTestBase.java

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,10 @@
3434
import org.apache.iceberg.data.Record;
3535
import org.apache.iceberg.flink.TestFixtures;
3636
import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
37+
import org.apache.iceberg.hadoop.HadoopTables;
3738
import org.apache.iceberg.io.CloseableIterator;
3839
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
40+
import org.junit.jupiter.api.BeforeAll;
3941
import org.junit.jupiter.api.TestTemplate;
4042
import org.junit.jupiter.api.extension.ExtendWith;
4143
import org.junit.jupiter.api.io.TempDir;
@@ -52,7 +54,7 @@ public static Object[][] parameters() {
5254
};
5355
}
5456

55-
@TempDir protected Path temporaryFolder;
57+
@TempDir protected static Path temporaryFolder;
5658

5759
protected abstract ReaderFunction<T> readerFunction();
5860

@@ -61,8 +63,14 @@ public static Object[][] parameters() {
6163
@Parameter(index = 0)
6264
private FileFormat fileFormat;
6365

64-
private final GenericAppenderFactory appenderFactory =
65-
new GenericAppenderFactory(TestFixtures.SCHEMA);
66+
private static GenericAppenderFactory appenderFactory;
67+
68+
@BeforeAll
69+
public static void beforeAll() {
70+
HadoopTables tables = new HadoopTables();
71+
appenderFactory =
72+
new GenericAppenderFactory(tables.create(TestFixtures.SCHEMA, temporaryFolder.toString()));
73+
}
6674

6775
private void assertRecordsAndPosition(
6876
List<Record> expectedRecords,

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestArrayPoolDataIteratorBatcherRowData.java

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,31 +32,45 @@
3232
import org.apache.iceberg.CombinedScanTask;
3333
import org.apache.iceberg.FileFormat;
3434
import org.apache.iceberg.FileScanTask;
35+
import org.apache.iceberg.Table;
3536
import org.apache.iceberg.data.GenericAppenderFactory;
3637
import org.apache.iceberg.data.RandomGenericData;
3738
import org.apache.iceberg.data.Record;
3839
import org.apache.iceberg.flink.FlinkConfigOptions;
40+
import org.apache.iceberg.flink.HadoopCatalogExtension;
3941
import org.apache.iceberg.flink.TestFixtures;
4042
import org.apache.iceberg.flink.TestHelpers;
4143
import org.apache.iceberg.flink.source.DataIterator;
4244
import org.apache.iceberg.io.CloseableIterator;
45+
import org.junit.jupiter.api.BeforeEach;
4346
import org.junit.jupiter.api.Test;
47+
import org.junit.jupiter.api.extension.RegisterExtension;
4448
import org.junit.jupiter.api.io.TempDir;
4549

4650
public class TestArrayPoolDataIteratorBatcherRowData {
4751

52+
@RegisterExtension
53+
private static final HadoopCatalogExtension CATALOG_EXTENSION =
54+
new HadoopCatalogExtension(TestFixtures.DATABASE, TestFixtures.TABLE);
55+
4856
@TempDir protected Path temporaryFolder;
4957
private static final FileFormat FILE_FORMAT = FileFormat.PARQUET;
5058
private final Configuration config =
5159
new Configuration()
5260
.set(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY, 1)
5361
.set(FlinkConfigOptions.SOURCE_READER_FETCH_BATCH_RECORD_COUNT, 2);
5462

55-
private final GenericAppenderFactory appenderFactory =
56-
new GenericAppenderFactory(TestFixtures.SCHEMA);
63+
private static GenericAppenderFactory appenderFactory;
5764
private final DataIteratorBatcher<RowData> batcher =
5865
new ArrayPoolDataIteratorBatcher<>(config, new RowDataRecordFactory(TestFixtures.ROW_TYPE));
5966

67+
@BeforeEach
68+
public void createTable() {
69+
Table testTable =
70+
CATALOG_EXTENSION.catalog().createTable(TestFixtures.TABLE_IDENTIFIER, TestFixtures.SCHEMA);
71+
appenderFactory = new GenericAppenderFactory(testTable);
72+
}
73+
6074
/** Read a CombinedScanTask that contains a single file with less than a full batch of records */
6175
@Test
6276
public void testSingleFileLessThanOneFullBatch() throws Exception {

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,12 @@
1818
*/
1919
package org.apache.iceberg.flink.source.reader;
2020

21-
import static org.apache.iceberg.flink.TestFixtures.DATABASE;
2221
import static org.apache.iceberg.types.Types.NestedField.required;
2322
import static org.assertj.core.api.Assertions.assertThat;
2423
import static org.assertj.core.api.Assertions.assertThatThrownBy;
2524
import static org.assertj.core.api.Assumptions.assumeThat;
2625

26+
import java.io.File;
2727
import java.io.IOException;
2828
import java.nio.file.Path;
2929
import java.time.LocalDateTime;
@@ -38,19 +38,18 @@
3838
import org.apache.iceberg.ParameterizedTestExtension;
3939
import org.apache.iceberg.Parameters;
4040
import org.apache.iceberg.Schema;
41+
import org.apache.iceberg.Table;
4142
import org.apache.iceberg.data.GenericAppenderFactory;
4243
import org.apache.iceberg.data.RandomGenericData;
4344
import org.apache.iceberg.data.Record;
44-
import org.apache.iceberg.flink.HadoopTableExtension;
45-
import org.apache.iceberg.flink.TestFixtures;
4645
import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
46+
import org.apache.iceberg.hadoop.HadoopTables;
4747
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
4848
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
4949
import org.apache.iceberg.types.Types;
5050
import org.junit.jupiter.api.BeforeAll;
5151
import org.junit.jupiter.api.TestTemplate;
5252
import org.junit.jupiter.api.extension.ExtendWith;
53-
import org.junit.jupiter.api.extension.RegisterExtension;
5453
import org.junit.jupiter.api.io.TempDir;
5554

5655
@ExtendWith(ParameterizedTestExtension.class)
@@ -62,7 +61,10 @@ public class TestColumnStatsWatermarkExtractor {
6261
required(3, "long_column", Types.LongType.get()),
6362
required(4, "string_column", Types.StringType.get()));
6463

65-
private static final GenericAppenderFactory APPENDER_FACTORY = new GenericAppenderFactory(SCHEMA);
64+
@TempDir protected static File tableDir = null;
65+
private static final HadoopTables TABLES = new HadoopTables();
66+
67+
private static GenericAppenderFactory appenderFactory;
6668

6769
private static final List<List<Record>> TEST_RECORDS =
6870
ImmutableList.of(
@@ -73,15 +75,13 @@ public class TestColumnStatsWatermarkExtractor {
7375

7476
@TempDir protected Path temporaryFolder;
7577

76-
@RegisterExtension
77-
private static final HadoopTableExtension SOURCE_TABLE_EXTENSION =
78-
new HadoopTableExtension(DATABASE, TestFixtures.TABLE, SCHEMA);
79-
8078
@Parameter(index = 0)
8179
private String columnName;
8280

8381
@BeforeAll
8482
public static void updateMinValue() {
83+
Table testTable = TABLES.create(SCHEMA, tableDir.toString());
84+
appenderFactory = new GenericAppenderFactory(testTable);
8585
for (int i = 0; i < TEST_RECORDS.size(); ++i) {
8686
for (Record r : TEST_RECORDS.get(i)) {
8787
Map<String, Long> minValues = MIN_VALUES.get(i);
@@ -131,7 +131,7 @@ public void testMultipleFiles() throws IOException {
131131
IcebergSourceSplit combinedSplit =
132132
IcebergSourceSplit.fromCombinedScanTask(
133133
ReaderUtil.createCombinedScanTask(
134-
TEST_RECORDS, temporaryFolder, FileFormat.PARQUET, APPENDER_FACTORY));
134+
TEST_RECORDS, temporaryFolder, FileFormat.PARQUET, appenderFactory));
135135

136136
ColumnStatsWatermarkExtractor extractor =
137137
new ColumnStatsWatermarkExtractor(SCHEMA, columnName, null);
@@ -171,6 +171,6 @@ private IcebergSourceSplit split(int id) throws IOException {
171171
ImmutableList.of(TEST_RECORDS.get(id)),
172172
temporaryFolder,
173173
FileFormat.PARQUET,
174-
APPENDER_FACTORY));
174+
appenderFactory));
175175
}
176176
}

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestIcebergSourceReader.java

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
import org.apache.flink.table.data.RowData;
3333
import org.apache.iceberg.CombinedScanTask;
3434
import org.apache.iceberg.FileFormat;
35+
import org.apache.iceberg.Table;
3536
import org.apache.iceberg.data.GenericAppenderFactory;
3637
import org.apache.iceberg.data.Record;
3738
import org.apache.iceberg.encryption.PlaintextEncryptionManager;
@@ -40,14 +41,22 @@
4041
import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
4142
import org.apache.iceberg.flink.source.split.SerializableComparator;
4243
import org.apache.iceberg.hadoop.HadoopFileIO;
44+
import org.apache.iceberg.hadoop.HadoopTables;
45+
import org.junit.jupiter.api.BeforeAll;
4346
import org.junit.jupiter.api.Test;
4447
import org.junit.jupiter.api.io.TempDir;
4548

4649
public class TestIcebergSourceReader {
47-
@TempDir protected Path temporaryFolder;
50+
@TempDir protected static Path temporaryFolder;
4851

49-
private final GenericAppenderFactory appenderFactory =
50-
new GenericAppenderFactory(TestFixtures.SCHEMA);
52+
private static GenericAppenderFactory appenderFactory = null;
53+
54+
@BeforeAll
55+
public static void beforeAll() {
56+
HadoopTables tables = new HadoopTables();
57+
Table testTable = tables.create(TestFixtures.SCHEMA, temporaryFolder.toFile().toString());
58+
appenderFactory = new GenericAppenderFactory(testTable);
59+
}
5160

5261
@Test
5362
public void testReaderMetrics() throws Exception {

flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestLimitableDataIterator.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,15 @@
2626
import org.apache.flink.table.data.RowData;
2727
import org.apache.iceberg.CombinedScanTask;
2828
import org.apache.iceberg.FileFormat;
29+
import org.apache.iceberg.Table;
2930
import org.apache.iceberg.data.GenericAppenderFactory;
3031
import org.apache.iceberg.data.Record;
3132
import org.apache.iceberg.encryption.EncryptionManager;
3233
import org.apache.iceberg.encryption.PlaintextEncryptionManager;
3334
import org.apache.iceberg.flink.TestFixtures;
3435
import org.apache.iceberg.flink.source.RowDataFileScanTaskReader;
3536
import org.apache.iceberg.hadoop.HadoopFileIO;
37+
import org.apache.iceberg.hadoop.HadoopTables;
3638
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
3739
import org.junit.jupiter.api.BeforeAll;
3840
import org.junit.jupiter.api.io.TempDir;
@@ -53,7 +55,9 @@ public class TestLimitableDataIterator {
5355

5456
@BeforeAll
5557
public static void beforeClass() throws Exception {
56-
GenericAppenderFactory appenderFactory = new GenericAppenderFactory(TestFixtures.SCHEMA);
58+
HadoopTables tables = new HadoopTables();
59+
Table testTable = tables.create(TestFixtures.SCHEMA, TestFixtures.TABLE);
60+
GenericAppenderFactory appenderFactory = new GenericAppenderFactory(testTable);
5761
List<List<Record>> recordBatchList =
5862
ReaderUtil.createRecordBatchList(TestFixtures.SCHEMA, 3, 2);
5963
combinedScanTask =

flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/actions/TestRewriteDataFilesAction.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -331,8 +331,8 @@ public void testRewriteLargeTableHasResiduals() throws IOException {
331331
@TestTemplate
332332
public void testRewriteAvoidRepeateCompress() throws IOException {
333333
List<Record> expected = Lists.newArrayList();
334-
Schema schema = icebergTableUnPartitioned.schema();
335-
GenericAppenderFactory genericAppenderFactory = new GenericAppenderFactory(schema);
334+
GenericAppenderFactory genericAppenderFactory =
335+
new GenericAppenderFactory(icebergTableUnPartitioned);
336336
File file = File.createTempFile("junit", null, temp.toFile());
337337
int count = 0;
338338
try (FileAppender<Record> fileAppender =

flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/source/assigner/TestWatermarkBasedSplitAssigner.java

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import static org.apache.iceberg.types.Types.NestedField.required;
2222
import static org.assertj.core.api.Assertions.assertThat;
2323

24+
import java.io.File;
2425
import java.io.IOException;
2526
import java.time.Instant;
2627
import java.time.LocalDateTime;
@@ -32,6 +33,7 @@
3233
import java.util.stream.IntStream;
3334
import org.apache.iceberg.FileFormat;
3435
import org.apache.iceberg.Schema;
36+
import org.apache.iceberg.Table;
3537
import org.apache.iceberg.data.GenericAppenderFactory;
3638
import org.apache.iceberg.data.GenericRecord;
3739
import org.apache.iceberg.data.RandomGenericData;
@@ -42,15 +44,28 @@
4244
import org.apache.iceberg.flink.source.split.IcebergSourceSplit;
4345
import org.apache.iceberg.flink.source.split.SerializableComparator;
4446
import org.apache.iceberg.flink.source.split.SplitComparators;
47+
import org.apache.iceberg.hadoop.HadoopTables;
4548
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
4649
import org.apache.iceberg.types.Types;
4750
import org.apache.iceberg.util.SerializationUtil;
51+
import org.junit.jupiter.api.BeforeAll;
4852
import org.junit.jupiter.api.Test;
53+
import org.junit.jupiter.api.io.TempDir;
4954

5055
public class TestWatermarkBasedSplitAssigner extends SplitAssignerTestBase {
5156
public static final Schema SCHEMA =
5257
new Schema(required(1, "timestamp_column", Types.TimestampType.withoutZone()));
53-
private static final GenericAppenderFactory APPENDER_FACTORY = new GenericAppenderFactory(SCHEMA);
58+
59+
@TempDir private static File tableLocation;
60+
61+
private static GenericAppenderFactory appenderFactory;
62+
63+
@BeforeAll
64+
public static void beforeAll() {
65+
HadoopTables tables = new HadoopTables();
66+
Table table = tables.create(SCHEMA, tableLocation.toPath().toString());
67+
appenderFactory = new GenericAppenderFactory(table);
68+
}
5469

5570
@Override
5671
protected SplitAssigner splitAssigner() {
@@ -138,7 +153,7 @@ private IcebergSourceSplit splitFromRecords(List<List<Record>> records) {
138153
try {
139154
return IcebergSourceSplit.fromCombinedScanTask(
140155
ReaderUtil.createCombinedScanTask(
141-
records, temporaryFolder, FileFormat.PARQUET, APPENDER_FACTORY));
156+
records, temporaryFolder, FileFormat.PARQUET, appenderFactory));
142157
} catch (IOException e) {
143158
throw new RuntimeException("Split creation exception", e);
144159
}

0 commit comments

Comments
 (0)