35
35
public class IcebergSinkBuilder {
36
36
private static final String DEFAULT_GLUE_DB = "default" ;
37
37
private static final String DEFAULT_ICEBERG_TABLE_NAME = "prices_iceberg" ;
38
- private static final String DEFAULT_ICEBERG_SORT_ORDER_FIELD = "accountNr" ;
39
38
private static final String DEFAULT_ICEBERG_PARTITION_FIELDS = "symbol" ;
40
39
private static final String DEFAULT_ICEBERG_OPERATION = "upsert" ;
41
40
private static final String DEFAULT_ICEBERG_UPSERT_FIELDS = "symbol" ;
@@ -45,7 +44,7 @@ public class IcebergSinkBuilder {
45
44
* If Iceberg Table has not been previously created, we will create it using the Partition Fields specified in the
46
45
* Properties, as well as add a Sort Field to improve query performance
47
46
*/
48
- private static void createTable (Catalog catalog , TableIdentifier outputTable , org .apache .iceberg .Schema icebergSchema , PartitionSpec partitionSpec , String sortField ) {
47
+ private static void createTable (Catalog catalog , TableIdentifier outputTable , org .apache .iceberg .Schema icebergSchema , PartitionSpec partitionSpec ) {
49
48
// If table has been previously created, we do not do any operation or modification
50
49
if (!catalog .tableExists (outputTable )) {
51
50
Table icebergTable = catalog .createTable (outputTable , icebergSchema , partitionSpec );
@@ -79,8 +78,6 @@ public static FlinkSink.Builder createBuilder(Properties icebergProperties, Data
79
78
String partitionFields = icebergProperties .getProperty ("partition.fields" , DEFAULT_ICEBERG_PARTITION_FIELDS );
80
79
List <String > partitionFieldList = Arrays .asList (partitionFields .split ("\\ s*,\\ s*" ));
81
80
82
- String sortField = icebergProperties .getProperty ("sort.field" , DEFAULT_ICEBERG_SORT_ORDER_FIELD );
83
-
84
81
// Iceberg you can perform Appends, Upserts and Overwrites.
85
82
String icebergOperation = icebergProperties .getProperty ("operation" , DEFAULT_ICEBERG_OPERATION );
86
83
Preconditions .checkArgument (icebergOperation .equals ("append" ) || icebergOperation .equals ("upsert" ) || icebergOperation .equals ("overwrite" ), "Invalid Iceberg Operation" );
@@ -119,7 +116,7 @@ public static FlinkSink.Builder createBuilder(Properties icebergProperties, Data
119
116
// Based on how many fields we want to partition, we create the Partition Spec
120
117
PartitionSpec partitionSpec = getPartitionSpec (icebergSchema , partitionFieldList );
121
118
// We create the Iceberg Table, using the Iceberg Catalog, Table Identifier, Schema parsed in Iceberg Schema Format and the partition spec
122
- createTable (catalog , outputTable , icebergSchema , partitionSpec , sortField );
119
+ createTable (catalog , outputTable , icebergSchema , partitionSpec );
123
120
// Once the table has been created in the job or before, we load it
124
121
TableLoader tableLoader = TableLoader .fromCatalog (glueCatalogLoader , outputTable );
125
122
// Get RowType Schema from Iceberg Schema
0 commit comments