Skip to content

Commit 0747354

Browse files
committed
update
1 parent 487bed2 commit 0747354

2 files changed

Lines changed: 51 additions & 0 deletions

File tree

paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/iceberg/IcebergHiveMetadataCommitter.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,10 @@ private void commitMetadataForTarget(
161161
hiveTable = createTable(databaseName, tableName, newMetadataPath);
162162
}
163163

164+
// Iceberg readers (e.g. iceberg.spark.SparkSessionCatalog) only load tables where
165+
// table_type=ICEBERG, so stamp it on every commit, existing entries created by Paimon's
166+
// HiveCatalog as table_type=PAIMON are migrated here.
167+
hiveTable.getParameters().put("table_type", "ICEBERG");
164168
hiveTable.getParameters().put("metadata_location", newMetadataPath.toString());
165169
if (baseMetadataPath != null) {
166170
hiveTable

paimon-hive/paimon-hive-connector-common/src/test/java/org/apache/paimon/iceberg/IcebergHiveMetadataCommitterITCaseBase.java

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,53 @@ public void testMultipleDatabases() throws Exception {
321321
"SELECT * FROM my_iceberg.iceberg_db2.t2 ORDER BY pt, id")));
322322
}
323323

324+
/**
325+
* Verifies that when a Paimon table is created via Paimon's HiveCatalog with Iceberg
326+
* compatibility enabled, the resulting Hive entry has {@code table_type=ICEBERG} after the
327+
* first commit so Iceberg readers can recognize it and partition-prune correctly.
328+
*/
329+
@Test
330+
public void testIcebergCommitSetsHiveTableTypeToIceberg() throws Exception {
331+
TableEnvironment tEnv =
332+
TableEnvironmentImpl.create(
333+
EnvironmentSettings.newInstance().inBatchMode().build());
334+
tEnv.executeSql(
335+
"CREATE CATALOG my_paimon_hive WITH ( 'type' = 'paimon', 'metastore' = 'hive', "
336+
+ "'uri' = '', 'warehouse' = '"
337+
+ path
338+
+ "' )");
339+
tEnv.executeSql("CREATE DATABASE my_paimon_hive.test_db");
340+
tEnv.executeSql(
341+
"CREATE TABLE my_paimon_hive.test_db.t ( pt INT, id INT, data STRING ) "
342+
+ "PARTITIONED BY (pt) WITH "
343+
+ "( 'metadata.iceberg.storage' = 'hive-catalog', "
344+
+ " 'metadata.iceberg.uri' = '', 'file.format' = 'avro' )");
345+
tEnv.executeSql(
346+
"INSERT INTO my_paimon_hive.test_db.t VALUES "
347+
+ "(1, 1, 'apple'), (1, 2, 'pear'), "
348+
+ "(2, 1, 'cat'), (2, 2, 'dog')")
349+
.await();
350+
351+
List<String> tblPropRows = hiveShell.executeQuery("SHOW TBLPROPERTIES test_db.t");
352+
String tableTypeRow =
353+
tblPropRows.stream()
354+
.filter(r -> r.toLowerCase().startsWith("table_type"))
355+
.findFirst()
356+
.orElse("(table_type row missing)");
357+
boolean metadataLocationPresent =
358+
tblPropRows.stream().anyMatch(r -> r.toLowerCase().startsWith("metadata_location"));
359+
assertTrue(
360+
metadataLocationPresent,
361+
"metadata_location must be written on commit; rows=" + tblPropRows);
362+
assertTrue(
363+
tableTypeRow.toUpperCase().contains("ICEBERG"),
364+
"Expected table_type=ICEBERG so Iceberg readers can load the table; "
365+
+ "got row: '"
366+
+ tableTypeRow
367+
+ "'; full props: "
368+
+ tblPropRows);
369+
}
370+
324371
@Test
325372
public void testCustomMetastoreClass() {
326373
TableEnvironment tEnv =

0 commit comments

Comments
 (0)