|
19 | 19 | package org.apache.hudi.client;
|
20 | 20 |
|
21 | 21 | import org.apache.hudi.client.common.HoodieFlinkEngineContext;
|
| 22 | +import org.apache.hudi.client.utils.TransactionUtils; |
22 | 23 | import org.apache.hudi.common.data.HoodieListData;
|
23 | 24 | import org.apache.hudi.common.engine.HoodieEngineContext;
|
24 | 25 | import org.apache.hudi.common.fs.FSUtils;
|
|
32 | 33 | import org.apache.hudi.common.model.WriteOperationType;
|
33 | 34 | import org.apache.hudi.common.table.HoodieTableMetaClient;
|
34 | 35 | import org.apache.hudi.common.table.HoodieTableVersion;
|
| 36 | +import org.apache.hudi.common.table.timeline.HoodieInstant; |
35 | 37 | import org.apache.hudi.common.util.Option;
|
36 | 38 | import org.apache.hudi.config.HoodieWriteConfig;
|
37 | 39 | import org.apache.hudi.exception.HoodieNotSupportedException;
|
|
44 | 46 | import org.apache.hudi.table.HoodieFlinkTable;
|
45 | 47 | import org.apache.hudi.table.HoodieTable;
|
46 | 48 | import org.apache.hudi.table.action.HoodieWriteMetadata;
|
47 |
| -import org.apache.hudi.table.marker.WriteMarkersFactory; |
48 | 49 | import org.apache.hudi.table.upgrade.FlinkUpgradeDowngradeHelper;
|
49 | 50 | import org.apache.hudi.table.upgrade.UpgradeDowngrade;
|
50 | 51 | import org.apache.hudi.util.WriteStatMerger;
|
@@ -274,6 +275,19 @@ public void preWrite(String instantTime, WriteOperationType writeOperationType,
|
274 | 275 | // remove the async cleaning
|
275 | 276 | }
|
276 | 277 |
|
| 278 | + /** |
| 279 | + * Refresh the last transaction metadata, |
| 280 | + * should be called before the Driver starts a new transaction. |
| 281 | + */ |
| 282 | + public void preTxn(HoodieTableMetaClient metaClient) { |
| 283 | + if (txnManager.isOptimisticConcurrencyControlEnabled()) { |
| 284 | + // refresh the meta client which is reused |
| 285 | + metaClient.reloadActiveTimeline(); |
| 286 | + this.lastCompletedTxnAndMetadata = TransactionUtils.getLastCompletedTxnInstantAndMetadata(metaClient); |
| 287 | + this.pendingInflightAndRequestedInstants = TransactionUtils.getInflightAndRequestedInstants(metaClient); |
| 288 | + } |
| 289 | + } |
| 290 | + |
277 | 291 | @Override
|
278 | 292 | protected void writeTableMetadata(HoodieTable table, String instantTime, String actionType, HoodieCommitMetadata metadata) {
|
279 | 293 | tableServiceClient.writeTableMetadata(table, instantTime, actionType, metadata);
|
@@ -322,30 +336,12 @@ protected List<WriteStatus> postWrite(HoodieWriteMetadata<List<WriteStatus>> res
|
322 | 336 | return result.getWriteStatuses();
|
323 | 337 | }
|
324 | 338 |
|
325 |
| - /** |
326 |
| - * Post commit is rewrite to be invoked after a successful commit. |
327 |
| - * |
328 |
| - * <p>The Flink write client is designed to write data set as buckets |
329 |
| - * but cleaning action should trigger after all the write actions within a |
330 |
| - * checkpoint finish. |
331 |
| - * |
332 |
| - * @param table Table to commit on |
333 |
| - * @param metadata Commit Metadata corresponding to committed instant |
334 |
| - * @param instantTime Instant Time |
335 |
| - * @param extraMetadata Additional Metadata passed by user |
336 |
| - */ |
337 | 339 | @Override
|
338 |
| - protected void postCommit(HoodieTable table, |
339 |
| - HoodieCommitMetadata metadata, |
340 |
| - String instantTime, |
341 |
| - Option<Map<String, String>> extraMetadata) { |
342 |
| - try { |
343 |
| - // Delete the marker directory for the instant. |
344 |
| - WriteMarkersFactory.get(config.getMarkersType(), createTable(config, hadoopConf), instantTime) |
345 |
| - .quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism()); |
346 |
| - } finally { |
347 |
| - this.heartbeatClient.stop(instantTime); |
348 |
| - } |
| 340 | + protected void preCommit(HoodieInstant inflightInstant, HoodieCommitMetadata metadata) { |
| 341 | + // Create a Hoodie table after startTxn which encapsulated the commits and files visible. |
| 342 | + // Important to create this after the lock to ensure the latest commits show up in the timeline without need for reload |
| 343 | + HoodieTable table = createTable(config, hadoopConf); |
| 344 | + resolveWriteConflict(table, metadata, this.pendingInflightAndRequestedInstants); |
349 | 345 | }
|
350 | 346 |
|
351 | 347 | @Override
|
|
0 commit comments