diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index 06b28670462d..4c96492babc1 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -208,7 +208,7 @@ sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free, sublivelist_verify_block_t svb = { .svb_dva = bp->blk_dva[i], .svb_allocated_txg = - BP_GET_LOGICAL_BIRTH(bp) + BP_GET_BIRTH(bp) }; if (zfs_btree_find(&sv->sv_leftover, &svb, @@ -2568,7 +2568,7 @@ snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp, (u_longlong_t)BP_GET_PSIZE(bp), (u_longlong_t)BP_GET_FILL(bp), (u_longlong_t)BP_GET_LOGICAL_BIRTH(bp), - (u_longlong_t)BP_GET_BIRTH(bp)); + (u_longlong_t)BP_GET_PHYSICAL_BIRTH(bp)); if (bp_freed) (void) snprintf(blkbuf + strlen(blkbuf), buflen - strlen(blkbuf), " %s", "FREE"); @@ -2618,7 +2618,7 @@ visit_indirect(spa_t *spa, const dnode_phys_t *dnp, { int err = 0; - if (BP_GET_LOGICAL_BIRTH(bp) == 0) + if (BP_GET_BIRTH(bp) == 0) return (0); print_indirect(spa, bp, zb, dnp); @@ -2806,7 +2806,7 @@ dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) (void) arg, (void) tx; char blkbuf[BP_SPRINTF_LEN]; - if (BP_GET_LOGICAL_BIRTH(bp) != 0) { + if (BP_GET_BIRTH(bp) != 0) { snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); (void) printf("\t%s\n", blkbuf); } @@ -2847,7 +2847,7 @@ dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) (void) arg, (void) tx; char blkbuf[BP_SPRINTF_LEN]; - ASSERT(BP_GET_LOGICAL_BIRTH(bp) != 0); + ASSERT(BP_GET_BIRTH(bp) != 0); snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed); (void) printf("\t%s\n", blkbuf); return (0); @@ -5921,11 +5921,11 @@ zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, * entry back to the block pointer before we claim it. */ if (v == DDT_PHYS_FLAT) { - ASSERT3U(BP_GET_BIRTH(bp), ==, + ASSERT3U(BP_GET_PHYSICAL_BIRTH(bp), ==, ddt_phys_birth(dde->dde_phys, v)); tempbp = *bp; ddt_bp_fill(dde->dde_phys, v, &tempbp, - BP_GET_BIRTH(bp)); + BP_GET_PHYSICAL_BIRTH(bp)); bp = &tempbp; } @@ -6151,7 +6151,7 @@ zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, if (zb->zb_level == ZB_DNODE_LEVEL) return (0); - if (dump_opt['b'] >= 5 && BP_GET_LOGICAL_BIRTH(bp) > 0) { + if (dump_opt['b'] >= 5 && BP_GET_BIRTH(bp) > 0) { char blkbuf[BP_SPRINTF_LEN]; snprintf_blkptr(blkbuf, sizeof (blkbuf), bp); (void) printf("objset %llu object %llu " diff --git a/cmd/zdb/zdb_il.c b/cmd/zdb/zdb_il.c index 6b90b08ca1b1..62e290cd122c 100644 --- a/cmd/zdb/zdb_il.c +++ b/cmd/zdb/zdb_il.c @@ -176,7 +176,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg) if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { (void) printf("%shas blkptr, %s\n", tab_prefix, - !BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) >= + !BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa) ? "will claim" : "won't claim"); print_log_bp(bp, tab_prefix); @@ -189,7 +189,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg) (void) printf("%s\n", tab_prefix); return; } - if (BP_GET_LOGICAL_BIRTH(bp) < zilog->zl_header->zh_claim_txg) { + if (BP_GET_BIRTH(bp) < zilog->zl_header->zh_claim_txg) { (void) printf("%s\n", tab_prefix); return; @@ -240,7 +240,7 @@ zil_prt_rec_write_enc(zilog_t *zilog, int txtype, const void *arg) if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { (void) printf("%shas blkptr, %s\n", tab_prefix, - !BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) >= + !BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa) ? "will claim" : "won't claim"); print_log_bp(bp, tab_prefix); @@ -476,7 +476,7 @@ print_log_block(zilog_t *zilog, const blkptr_t *bp, void *arg, if (claim_txg != 0) claim = "already claimed"; - else if (BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa)) + else if (BP_GET_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa)) claim = "will claim"; else claim = "won't claim"; diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c index 81727224b04e..533b355fa858 100644 --- a/cmd/zfs/zfs_main.c +++ b/cmd/zfs/zfs_main.c @@ -440,7 +440,7 @@ get_usage(zfs_help_t idx) return (gettext("\tredact " " ...\n")); case HELP_REWRITE: - return (gettext("\trewrite [-rvx] [-o ] [-l ] " + return (gettext("\trewrite [-Prvx] [-o ] [-l ] " "\n")); case HELP_JAIL: return (gettext("\tjail \n")); @@ -9177,8 +9177,11 @@ zfs_do_rewrite(int argc, char **argv) zfs_rewrite_args_t args; memset(&args, 0, sizeof (args)); - while ((c = getopt(argc, argv, "l:o:rvx")) != -1) { + while ((c = getopt(argc, argv, "Pl:o:rvx")) != -1) { switch (c) { + case 'P': + args.flags |= ZFS_REWRITE_PHYSICAL; + break; case 'l': args.len = strtoll(optarg, NULL, 0); break; diff --git a/include/sys/dbuf.h b/include/sys/dbuf.h index 756459b2fbb5..baf3b1508335 100644 --- a/include/sys/dbuf.h +++ b/include/sys/dbuf.h @@ -164,6 +164,7 @@ typedef struct dbuf_dirty_record { boolean_t dr_nopwrite; boolean_t dr_brtwrite; boolean_t dr_diowrite; + boolean_t dr_rewrite; boolean_t dr_has_raw_params; /* Override and raw params are mutually exclusive. */ diff --git a/include/sys/dmu.h b/include/sys/dmu.h index 0b2e443a433a..84828ef1426c 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -822,6 +822,7 @@ struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db); */ void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); void dmu_buf_will_dirty_flags(dmu_buf_t *db, dmu_tx_t *tx, dmu_flags_t flags); +void dmu_buf_will_rewrite(dmu_buf_t *db, dmu_tx_t *tx); boolean_t dmu_buf_is_dirty(dmu_buf_t *db, dmu_tx_t *tx); void dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx); diff --git a/include/sys/dmu_traverse.h b/include/sys/dmu_traverse.h index 3196b2addeee..70cafa4c74f1 100644 --- a/include/sys/dmu_traverse.h +++ b/include/sys/dmu_traverse.h @@ -59,6 +59,13 @@ typedef int (blkptr_cb_t)(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, */ #define TRAVERSE_NO_DECRYPT (1<<5) +/* + * Always use logical birth time for birth time comparisons. This is useful + * for operations that care about user data changes rather than physical + * block rewrites (e.g., incremental replication). + */ +#define TRAVERSE_LOGICAL (1<<6) + /* Special traverse error return value to indicate skipping of children */ #define TRAVERSE_VISIT_NO_CHILDREN -1 diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index c8deb5be419e..fc359c10365a 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -1627,6 +1627,9 @@ typedef struct zfs_rewrite_args { uint64_t arg; } zfs_rewrite_args_t; +/* zfs_rewrite_args flags */ +#define ZFS_REWRITE_PHYSICAL 0x1 /* Preserve logical birth time. */ + #define ZFS_IOC_REWRITE _IOW(0x83, 3, zfs_rewrite_args_t) /* diff --git a/include/sys/spa.h b/include/sys/spa.h index e0eed831d30b..db6de332ae67 100644 --- a/include/sys/spa.h +++ b/include/sys/spa.h @@ -140,7 +140,7 @@ typedef struct zio_cksum_salt { * +-------+-------+-------+-------+-------+-------+-------+-------+ * 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 7 | padding | + * 7 |R| padding | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 8 | padding | * +-------+-------+-------+-------+-------+-------+-------+-------+ @@ -175,6 +175,7 @@ typedef struct zio_cksum_salt { * E blkptr_t contains embedded data (see below) * lvl level of indirection * type DMU object type + * R rewrite (reallocated/rewritten at phys birth TXG) * phys birth txg when dva[0] was written; zero if same as logical birth txg * note that typically all the dva's would be written in this * txg, but they could be different if they were moved by @@ -204,7 +205,7 @@ typedef struct zio_cksum_salt { * +-------+-------+-------+-------+-------+-------+-------+-------+ * 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 7 | padding | + * 7 |R| padding | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 8 | padding | * +-------+-------+-------+-------+-------+-------+-------+-------+ @@ -373,7 +374,8 @@ typedef enum bp_embedded_type { typedef struct blkptr { dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */ uint64_t blk_prop; /* size, compression, type, etc */ - uint64_t blk_pad[2]; /* Extra space for the future */ + uint64_t blk_prop2; /* additional properties */ + uint64_t blk_pad; /* Extra space for the future */ uint64_t blk_birth_word[2]; uint64_t blk_fill; /* fill count */ zio_cksum_t blk_cksum; /* 256-bit checksum */ @@ -476,32 +478,51 @@ typedef struct blkptr { #define BP_GET_FREE(bp) BF64_GET((bp)->blk_fill, 0, 1) #define BP_SET_FREE(bp, x) BF64_SET((bp)->blk_fill, 0, 1, x) +/* + * Block birth time macros for different use cases: + * - BP_GET_LOGICAL_BIRTH(): When the block was logically modified by user. + * To be used with a focus on user data, like incremental replication. + * - BP_GET_PHYSICAL_BIRTH(): When the block was physically written to disks. + * For regular writes is equal to logical birth. For dedup and block cloning + * can be smaller than logical birth. For remapped and rewritten blocks can + * be bigger. To be used with focus on physical disk content: ARC, DDT, scrub. + * - BP_GET_RAW_PHYSICAL_BIRTH(): Raw physical birth value. Zero if equal + * to logical birth. Should only be used for BP copying and debugging. + * - BP_GET_BIRTH(): When the block was allocated, which is a physical birth + * for rewritten blocks (rewrite flag set) or logical birth otherwise. + */ #define BP_GET_LOGICAL_BIRTH(bp) (bp)->blk_birth_word[1] #define BP_SET_LOGICAL_BIRTH(bp, x) ((bp)->blk_birth_word[1] = (x)) -#define BP_GET_PHYSICAL_BIRTH(bp) (bp)->blk_birth_word[0] +#define BP_GET_RAW_PHYSICAL_BIRTH(bp) (bp)->blk_birth_word[0] #define BP_SET_PHYSICAL_BIRTH(bp, x) ((bp)->blk_birth_word[0] = (x)) -#define BP_GET_BIRTH(bp) \ - (BP_IS_EMBEDDED(bp) ? 0 : \ - BP_GET_PHYSICAL_BIRTH(bp) ? BP_GET_PHYSICAL_BIRTH(bp) : \ +#define BP_GET_PHYSICAL_BIRTH(bp) \ + (BP_IS_EMBEDDED(bp) ? 0 : \ + BP_GET_RAW_PHYSICAL_BIRTH(bp) ? BP_GET_RAW_PHYSICAL_BIRTH(bp) : \ BP_GET_LOGICAL_BIRTH(bp)) -#define BP_SET_BIRTH(bp, logical, physical) \ -{ \ - ASSERT(!BP_IS_EMBEDDED(bp)); \ - BP_SET_LOGICAL_BIRTH(bp, logical); \ - BP_SET_PHYSICAL_BIRTH(bp, \ - ((logical) == (physical) ? 0 : (physical))); \ +#define BP_GET_BIRTH(bp) \ + ((BP_IS_EMBEDDED(bp) || !BP_GET_REWRITE(bp)) ? \ + BP_GET_LOGICAL_BIRTH(bp) : BP_GET_PHYSICAL_BIRTH(bp)) + +#define BP_SET_BIRTH(bp, logical, physical) \ +{ \ + ASSERT(!BP_IS_EMBEDDED(bp)); \ + BP_SET_LOGICAL_BIRTH(bp, logical); \ + BP_SET_PHYSICAL_BIRTH(bp, \ + ((logical) == (physical) ? 0 : (physical))); \ } #define BP_GET_FILL(bp) \ - ((BP_IS_ENCRYPTED(bp)) ? BF64_GET((bp)->blk_fill, 0, 32) : \ - ((BP_IS_EMBEDDED(bp)) ? 1 : (bp)->blk_fill)) + (BP_IS_EMBEDDED(bp) ? 1 : \ + BP_IS_ENCRYPTED(bp) ? BF64_GET((bp)->blk_fill, 0, 32) : \ + (bp)->blk_fill) #define BP_SET_FILL(bp, fill) \ { \ - if (BP_IS_ENCRYPTED(bp)) \ + ASSERT(!BP_IS_EMBEDDED(bp)); \ + if (BP_IS_ENCRYPTED(bp)) \ BF64_SET((bp)->blk_fill, 0, 32, fill); \ else \ (bp)->blk_fill = fill; \ @@ -516,6 +537,15 @@ typedef struct blkptr { BF64_SET((bp)->blk_fill, 32, 32, iv2); \ } +#define BP_GET_REWRITE(bp) \ + (BP_IS_EMBEDDED(bp) ? 0 : BF64_GET((bp)->blk_prop2, 63, 1)) + +#define BP_SET_REWRITE(bp, x) \ +{ \ + ASSERT(!BP_IS_EMBEDDED(bp)); \ + BF64_SET((bp)->blk_prop2, 63, 1, x); \ +} + #define BP_IS_METADATA(bp) \ (BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) @@ -545,7 +575,7 @@ typedef struct blkptr { (dva1)->dva_word[0] == (dva2)->dva_word[0]) #define BP_EQUAL(bp1, bp2) \ - (BP_GET_BIRTH(bp1) == BP_GET_BIRTH(bp2) && \ + (BP_GET_PHYSICAL_BIRTH(bp1) == BP_GET_PHYSICAL_BIRTH(bp2) && \ BP_GET_LOGICAL_BIRTH(bp1) == BP_GET_LOGICAL_BIRTH(bp2) && \ DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \ DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \ @@ -588,8 +618,8 @@ typedef struct blkptr { { \ BP_ZERO_DVAS(bp); \ (bp)->blk_prop = 0; \ - (bp)->blk_pad[0] = 0; \ - (bp)->blk_pad[1] = 0; \ + (bp)->blk_prop2 = 0; \ + (bp)->blk_pad = 0; \ (bp)->blk_birth_word[0] = 0; \ (bp)->blk_birth_word[1] = 0; \ (bp)->blk_fill = 0; \ @@ -696,7 +726,7 @@ typedef struct blkptr { (u_longlong_t)BP_GET_LSIZE(bp), \ (u_longlong_t)BP_GET_PSIZE(bp), \ (u_longlong_t)BP_GET_LOGICAL_BIRTH(bp), \ - (u_longlong_t)BP_GET_BIRTH(bp), \ + (u_longlong_t)BP_GET_PHYSICAL_BIRTH(bp), \ (u_longlong_t)BP_GET_FILL(bp), \ ws, \ (u_longlong_t)bp->blk_cksum.zc_word[0], \ diff --git a/include/sys/zio.h b/include/sys/zio.h index b139c9de4852..a3368034695b 100644 --- a/include/sys/zio.h +++ b/include/sys/zio.h @@ -374,6 +374,7 @@ typedef struct zio_prop { boolean_t zp_encrypt; boolean_t zp_byteorder; boolean_t zp_direct_write; + boolean_t zp_rewrite; uint8_t zp_salt[ZIO_DATA_SALT_LEN]; uint8_t zp_iv[ZIO_DATA_IV_LEN]; uint8_t zp_mac[ZIO_DATA_MAC_LEN]; diff --git a/include/zfeature_common.h b/include/zfeature_common.h index 4877df4b114d..56382ca85b55 100644 --- a/include/zfeature_common.h +++ b/include/zfeature_common.h @@ -89,6 +89,7 @@ typedef enum spa_feature { SPA_FEATURE_LARGE_MICROZAP, SPA_FEATURE_DYNAMIC_GANG_HEADER, SPA_FEATURE_BLOCK_CLONING_ENDIAN, + SPA_FEATURE_PHYSICAL_REWRITE, SPA_FEATURES } spa_feature_t; diff --git a/lib/libzdb/libzdb.c b/lib/libzdb/libzdb.c index 12144dc65e75..cca1327b1b03 100644 --- a/lib/libzdb/libzdb.c +++ b/lib/libzdb/libzdb.c @@ -93,9 +93,9 @@ livelist_compare(const void *larg, const void *rarg) * Since we're storing blkptrs without cancelling FREE/ALLOC pairs, * it's possible the offsets are equal. In that case, sort by txg */ - if (BP_GET_LOGICAL_BIRTH(l) < BP_GET_LOGICAL_BIRTH(r)) { + if (BP_GET_BIRTH(l) < BP_GET_BIRTH(r)) { return (-1); - } else if (BP_GET_LOGICAL_BIRTH(l) > BP_GET_LOGICAL_BIRTH(r)) { + } else if (BP_GET_BIRTH(l) > BP_GET_BIRTH(r)) { return (+1); } return (0); diff --git a/lib/libzfs/libzfs.abi b/lib/libzfs/libzfs.abi index bd2ab6468021..ec0958e30be6 100644 --- a/lib/libzfs/libzfs.abi +++ b/lib/libzfs/libzfs.abi @@ -638,7 +638,7 @@ - + @@ -6398,7 +6398,8 @@ - + + @@ -9605,8 +9606,8 @@ - - + + @@ -9684,7 +9685,7 @@ - + diff --git a/man/man7/zpool-features.7 b/man/man7/zpool-features.7 index 66aa100b7149..10dfd1f92936 100644 --- a/man/man7/zpool-features.7 +++ b/man/man7/zpool-features.7 @@ -853,6 +853,23 @@ when the command is used on a top-level vdev, and will never return to being .Sy enabled . . +.feature com.truenas physical_rewrite yes extensible_dataset +This feature enables physical block rewriting that preserves logical birth +times, avoiding unnecessary inclusion of rewritten blocks in incremental +.Nm zfs Cm send +streams. +When enabled, the +.Nm zfs Cm rewrite Fl P +command can be used. +.Pp +This feature becomes +.Sy active +the first time +.Nm zfs Cm rewrite Fl P +is used on any dataset, and will return to being +.Sy enabled +once all datasets that have ever used physical rewrite are destroyed. +. .feature org.zfsonlinux project_quota yes extensible_dataset This feature allows administrators to account the spaces and objects usage information against the project identifier diff --git a/man/man8/zfs-rewrite.8 b/man/man8/zfs-rewrite.8 index 423d6d439e28..a3a037f3794a 100644 --- a/man/man8/zfs-rewrite.8 +++ b/man/man8/zfs-rewrite.8 @@ -31,7 +31,7 @@ .Sh SYNOPSIS .Nm zfs .Cm rewrite -.Oo Fl rvx Ns Oc +.Oo Fl Prvx Ns Oc .Op Fl l Ar length .Op Fl o Ar offset .Ar file Ns | Ns Ar directory Ns … @@ -43,6 +43,15 @@ as is without modification at a new location and possibly with new properties, such as checksum, compression, dedup, copies, etc, as if they were atomically read and written back. .Bl -tag -width "-r" +.It Fl P +Perform physical rewrite, preserving logical birth time of blocks. +By default, rewrite updates logical birth times, making blocks appear +as modified in snapshots and incremental send streams. +Physical rewrite preserves logical birth times, avoiding unnecessary +inclusion in incremental streams. +Physical rewrite requires the +.Sy physical_rewrite +feature to be enabled on the pool. .It Fl l Ar length Rewrite at most this number of bytes. .It Fl o Ar offset @@ -60,17 +69,22 @@ same as some property changes may increase pool space usage. Holes that were never written or were previously zero-compressed are not rewritten and will remain holes even if compression is disabled. .Pp -Rewritten blocks will be seen as modified in next snapshot and as such -included into the incremental -.Nm zfs Cm send -stream. -.Pp If a .Fl l or .Fl o value request a rewrite to regions past the end of the file, then those regions are silently ignored, and no error is reported. +.Pp +By default, rewritten blocks update their logical birth time, +meaning they will be included in incremental +.Nm zfs Cm send +streams as modified data. +When the +.Fl P +flag is used, rewritten blocks preserve their logical birth time, since +there are no user data changes. . .Sh SEE ALSO -.Xr zfsprops 7 +.Xr zfsprops 7 , +.Xr zpool-features 7 diff --git a/module/zcommon/zfeature_common.c b/module/zcommon/zfeature_common.c index 0b37530b0e11..6ba9892eeb64 100644 --- a/module/zcommon/zfeature_common.c +++ b/module/zcommon/zfeature_common.c @@ -798,6 +798,18 @@ zpool_feature_init(void) ZFEATURE_FLAG_MOS | ZFEATURE_FLAG_NO_UPGRADE, ZFEATURE_TYPE_BOOLEAN, NULL, sfeatures); + { + static const spa_feature_t physical_rewrite_deps[] = { + SPA_FEATURE_EXTENSIBLE_DATASET, + SPA_FEATURE_NONE + }; + zfeature_register(SPA_FEATURE_PHYSICAL_REWRITE, + "com.truenas:physical_rewrite", "physical_rewrite", + "Support for preserving logical birth time during rewrite.", + ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATASET, + ZFEATURE_TYPE_BOOLEAN, physical_rewrite_deps, sfeatures); + } + zfs_mod_list_supported_free(sfeatures); } diff --git a/module/zfs/arc.c b/module/zfs/arc.c index a2cb3b8a53e7..3483be64ec57 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -1052,7 +1052,7 @@ static arc_buf_hdr_t * buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) { const dva_t *dva = BP_IDENTITY(bp); - uint64_t birth = BP_GET_BIRTH(bp); + uint64_t birth = BP_GET_PHYSICAL_BIRTH(bp); uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); kmutex_t *hash_lock = BUF_HASH_LOCK(idx); arc_buf_hdr_t *hdr; @@ -5587,7 +5587,7 @@ arc_read_done(zio_t *zio) if (HDR_IN_HASH_TABLE(hdr)) { arc_buf_hdr_t *found; - ASSERT3U(hdr->b_birth, ==, BP_GET_BIRTH(zio->io_bp)); + ASSERT3U(hdr->b_birth, ==, BP_GET_PHYSICAL_BIRTH(zio->io_bp)); ASSERT3U(hdr->b_dva.dva_word[0], ==, BP_IDENTITY(zio->io_bp)->dva_word[0]); ASSERT3U(hdr->b_dva.dva_word[1], ==, @@ -5690,7 +5690,7 @@ arc_read_done(zio_t *zio) error = SET_ERROR(EIO); if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { spa_log_error(zio->io_spa, &acb->acb_zb, - BP_GET_LOGICAL_BIRTH(zio->io_bp)); + BP_GET_PHYSICAL_BIRTH(zio->io_bp)); (void) zfs_ereport_post( FM_EREPORT_ZFS_AUTHENTICATION, zio->io_spa, NULL, &acb->acb_zb, zio, 0); @@ -6109,7 +6109,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, if (!embedded_bp) { hdr->b_dva = *BP_IDENTITY(bp); - hdr->b_birth = BP_GET_BIRTH(bp); + hdr->b_birth = BP_GET_PHYSICAL_BIRTH(bp); exists = buf_hash_insert(hdr, &hash_lock); } if (exists != NULL) { @@ -6957,7 +6957,7 @@ arc_write_done(zio_t *zio) buf_discard_identity(hdr); } else { hdr->b_dva = *BP_IDENTITY(zio->io_bp); - hdr->b_birth = BP_GET_BIRTH(zio->io_bp); + hdr->b_birth = BP_GET_PHYSICAL_BIRTH(zio->io_bp); } } else { ASSERT(HDR_EMPTY(hdr)); diff --git a/module/zfs/bpobj.c b/module/zfs/bpobj.c index 8c19de93f12f..0a8a077edf63 100644 --- a/module/zfs/bpobj.c +++ b/module/zfs/bpobj.c @@ -954,8 +954,8 @@ space_range_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) (void) bp_freed, (void) tx; struct space_range_arg *sra = arg; - if (BP_GET_LOGICAL_BIRTH(bp) > sra->mintxg && - BP_GET_LOGICAL_BIRTH(bp) <= sra->maxtxg) { + if (BP_GET_BIRTH(bp) > sra->mintxg && + BP_GET_BIRTH(bp) <= sra->maxtxg) { if (dsl_pool_sync_context(spa_get_dsl(sra->spa))) sra->used += bp_get_dsize_sync(sra->spa, bp); else diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index f1b5a17f337e..d61f0d1a015d 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -1235,11 +1235,9 @@ dbuf_verify(dmu_buf_impl_t *db) DVA_IS_EMPTY(&bp->blk_dva[1]) && DVA_IS_EMPTY(&bp->blk_dva[2])); ASSERT0(bp->blk_fill); - ASSERT0(bp->blk_pad[0]); - ASSERT0(bp->blk_pad[1]); ASSERT(!BP_IS_EMBEDDED(bp)); ASSERT(BP_IS_HOLE(bp)); - ASSERT0(BP_GET_PHYSICAL_BIRTH(bp)); + ASSERT0(BP_GET_RAW_PHYSICAL_BIRTH(bp)); } } } @@ -1615,7 +1613,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, dmu_flags_t flags, */ if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bp)) { spa_log_error(db->db_objset->os_spa, &zb, - BP_GET_LOGICAL_BIRTH(bp)); + BP_GET_PHYSICAL_BIRTH(bp)); err = SET_ERROR(EIO); goto early_unlock; } @@ -2154,6 +2152,12 @@ dbuf_redirty(dbuf_dirty_record_t *dr) ASSERT(arc_released(db->db_buf)); arc_buf_thaw(db->db_buf); } + + /* + * Clear the rewrite flag since this is now a logical + * modification. + */ + dr->dt.dl.dr_rewrite = B_FALSE; } } @@ -2701,6 +2705,38 @@ dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) dmu_buf_will_dirty_flags(db_fake, tx, DMU_READ_NO_PREFETCH); } +void +dmu_buf_will_rewrite(dmu_buf_t *db_fake, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + + ASSERT(tx->tx_txg != 0); + ASSERT(!zfs_refcount_is_zero(&db->db_holds)); + + /* + * If the dbuf is already dirty in this txg, it will be written + * anyway, so there's nothing to do. + */ + mutex_enter(&db->db_mtx); + if (dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) { + mutex_exit(&db->db_mtx); + return; + } + mutex_exit(&db->db_mtx); + + /* + * The dbuf is not dirty, so we need to make it dirty and + * mark it for rewrite (preserve logical birth time). + */ + dmu_buf_will_dirty_flags(db_fake, tx, DMU_READ_NO_PREFETCH); + + mutex_enter(&db->db_mtx); + dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); + if (dr != NULL && db->db_level == 0) + dr->dt.dl.dr_rewrite = B_TRUE; + mutex_exit(&db->db_mtx); +} + boolean_t dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) { @@ -4899,7 +4935,7 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) dnode_diduse_space(dn, delta - zio->io_prev_space_delta); zio->io_prev_space_delta = delta; - if (BP_GET_LOGICAL_BIRTH(bp) != 0) { + if (BP_GET_BIRTH(bp) != 0) { ASSERT((db->db_blkid != DMU_SPILL_BLKID && BP_GET_TYPE(bp) == dn->dn_type) || (db->db_blkid == DMU_SPILL_BLKID && @@ -5186,7 +5222,7 @@ dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); drica.drica_os = dn->dn_objset; - drica.drica_blk_birth = BP_GET_LOGICAL_BIRTH(bp); + drica.drica_blk_birth = BP_GET_BIRTH(bp); drica.drica_tx = tx; if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, &drica)) { @@ -5201,8 +5237,7 @@ dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) if (dn->dn_objset != spa_meta_objset(spa)) { dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && - BP_GET_LOGICAL_BIRTH(bp) > - ds->ds_dir->dd_origin_txg) { + BP_GET_BIRTH(bp) > ds->ds_dir->dd_origin_txg) { ASSERT(!BP_IS_EMBEDDED(bp)); ASSERT(dsl_dir_is_clone(ds->ds_dir)); ASSERT(spa_feature_is_enabled(spa, @@ -5320,7 +5355,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) } ASSERT(db->db_level == 0 || data == db->db_buf); - ASSERT3U(BP_GET_LOGICAL_BIRTH(db->db_blkptr), <=, txg); + ASSERT3U(BP_GET_BIRTH(db->db_blkptr), <=, txg); ASSERT(pio); SET_BOOKMARK(&zb, os->os_dsl_dataset ? @@ -5333,6 +5368,24 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); + /* + * Set rewrite properties for zfs_rewrite() operations. + */ + if (db->db_level == 0 && dr->dt.dl.dr_rewrite) { + zp.zp_rewrite = B_TRUE; + + /* + * Mark physical rewrite feature for activation. + * This will be activated automatically during dataset sync. + */ + dsl_dataset_t *ds = os->os_dsl_dataset; + if (!dsl_dataset_feature_is_active(ds, + SPA_FEATURE_PHYSICAL_REWRITE)) { + ds->ds_feature_activation[ + SPA_FEATURE_PHYSICAL_REWRITE] = (void *)B_TRUE; + } + } + /* * We copy the blkptr now (rather than when we instantiate the dirty * record), because its value can change between open context and @@ -5403,6 +5456,7 @@ EXPORT_SYMBOL(dbuf_release_bp); EXPORT_SYMBOL(dbuf_dirty); EXPORT_SYMBOL(dmu_buf_set_crypt_params); EXPORT_SYMBOL(dmu_buf_will_dirty); +EXPORT_SYMBOL(dmu_buf_will_rewrite); EXPORT_SYMBOL(dmu_buf_is_dirty); EXPORT_SYMBOL(dmu_buf_will_clone_or_dio); EXPORT_SYMBOL(dmu_buf_will_not_fill); diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c index 60cbb7755a7e..e0b9fc3951ff 100644 --- a/module/zfs/ddt.c +++ b/module/zfs/ddt.c @@ -724,10 +724,13 @@ ddt_phys_extend(ddt_univ_phys_t *ddp, ddt_phys_variant_t v, const blkptr_t *bp) dvas[2] = bp->blk_dva[2]; if (ddt_phys_birth(ddp, v) == 0) { - if (v == DDT_PHYS_FLAT) - ddp->ddp_flat.ddp_phys_birth = BP_GET_BIRTH(bp); - else - ddp->ddp_trad[v].ddp_phys_birth = BP_GET_BIRTH(bp); + if (v == DDT_PHYS_FLAT) { + ddp->ddp_flat.ddp_phys_birth = + BP_GET_PHYSICAL_BIRTH(bp); + } else { + ddp->ddp_trad[v].ddp_phys_birth = + BP_GET_PHYSICAL_BIRTH(bp); + } } } @@ -891,14 +894,14 @@ ddt_phys_select(const ddt_t *ddt, const ddt_entry_t *dde, const blkptr_t *bp) if (ddt->ddt_flags & DDT_FLAG_FLAT) { if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_flat.ddp_dva[0]) && - BP_GET_BIRTH(bp) == ddp->ddp_flat.ddp_phys_birth) { + BP_GET_PHYSICAL_BIRTH(bp) == ddp->ddp_flat.ddp_phys_birth) { return (DDT_PHYS_FLAT); } } else /* traditional phys */ { for (int p = 0; p < DDT_PHYS_MAX; p++) { if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_trad[p].ddp_dva[0]) && - BP_GET_BIRTH(bp) == + BP_GET_PHYSICAL_BIRTH(bp) == ddp->ddp_trad[p].ddp_phys_birth) { return (p); } diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 21c465328134..296e58ef9cd8 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1966,7 +1966,7 @@ dmu_sync_late_arrival_done(zio_t *zio) blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig; ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); - ASSERT(BP_GET_LOGICAL_BIRTH(zio->io_bp) == zio->io_txg); + ASSERT(BP_GET_BIRTH(zio->io_bp) == zio->io_txg); ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); zio_free(zio->io_spa, zio->io_txg, zio->io_bp); } @@ -2508,6 +2508,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) zp->zp_encrypt = encrypt; zp->zp_byteorder = ZFS_HOST_BYTEORDER; zp->zp_direct_write = (wp & WP_DIRECT_WR) ? B_TRUE : B_FALSE; + zp->zp_rewrite = B_FALSE; memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN); memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN); memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN); @@ -2655,11 +2656,12 @@ dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, * operation into ZIL, or it may be impossible to replay, since * the block may appear not yet allocated at that point. */ - if (BP_GET_BIRTH(bp) > spa_freeze_txg(os->os_spa)) { + if (BP_GET_PHYSICAL_BIRTH(bp) > spa_freeze_txg(os->os_spa)) { error = SET_ERROR(EINVAL); goto out; } - if (BP_GET_BIRTH(bp) > spa_last_synced_txg(os->os_spa)) { + if (BP_GET_PHYSICAL_BIRTH(bp) > + spa_last_synced_txg(os->os_spa)) { error = SET_ERROR(EAGAIN); goto out; } @@ -2731,7 +2733,8 @@ dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, if (!BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) != 0) { if (!BP_IS_EMBEDDED(bp)) { BP_SET_BIRTH(&dl->dr_overridden_by, dr->dr_txg, - BP_GET_BIRTH(bp)); + BP_GET_PHYSICAL_BIRTH(bp)); + BP_SET_REWRITE(&dl->dr_overridden_by, 0); } else { BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg); diff --git a/module/zfs/dmu_diff.c b/module/zfs/dmu_diff.c index 86f751e886c9..fb13b2f87f57 100644 --- a/module/zfs/dmu_diff.c +++ b/module/zfs/dmu_diff.c @@ -224,8 +224,8 @@ dmu_diff(const char *tosnap_name, const char *fromsnap_name, * call the ZFS_IOC_OBJ_TO_STATS ioctl. */ error = traverse_dataset(tosnap, fromtxg, - TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_NO_DECRYPT, - diff_cb, &da); + TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_NO_DECRYPT | + TRAVERSE_LOGICAL, diff_cb, &da); if (error != 0) { da.da_err = error; diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index 3a4bd7a1cea9..6c36bf44414e 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -1403,7 +1403,7 @@ corrective_read_done(zio_t *zio) /* Corruption corrected; update error log if needed */ if (zio->io_error == 0) { spa_remove_error(data->spa, &data->zb, - BP_GET_LOGICAL_BIRTH(zio->io_bp)); + BP_GET_PHYSICAL_BIRTH(zio->io_bp)); } kmem_free(data, sizeof (cr_cb_data_t)); abd_free(zio->io_abd); @@ -1530,7 +1530,7 @@ do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw, } rrd->abd = abd; - io = zio_rewrite(NULL, rwa->os->os_spa, BP_GET_LOGICAL_BIRTH(bp), bp, + io = zio_rewrite(NULL, rwa->os->os_spa, BP_GET_BIRTH(bp), bp, abd, BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb); diff --git a/module/zfs/dmu_redact.c b/module/zfs/dmu_redact.c index 65443d112f27..9226ac9e4b80 100644 --- a/module/zfs/dmu_redact.c +++ b/module/zfs/dmu_redact.c @@ -370,8 +370,8 @@ redact_traverse_thread(void *arg) #endif err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg, - &rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, - redact_cb, rt_arg); + &rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | + TRAVERSE_LOGICAL, redact_cb, rt_arg); if (err != EINTR) rt_arg->error_code = err; diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index 4f27f3df0e55..deeba29e159a 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -1084,7 +1084,7 @@ send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, */ if (sta->os->os_encrypted && !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) { - spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp)); + spa_log_error(spa, zb, BP_GET_PHYSICAL_BIRTH(bp)); return (SET_ERROR(EIO)); } @@ -1210,7 +1210,7 @@ send_traverse_thread(void *arg) err = traverse_dataset_resume(st_arg->os->os_dsl_dataset, st_arg->fromtxg, &st_arg->resume, - st_arg->flags, send_cb, st_arg); + st_arg->flags | TRAVERSE_LOGICAL, send_cb, st_arg); if (err != EINTR) st_arg->error_code = err; diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c index f534a7dd64e3..dd1df1705040 100644 --- a/module/zfs/dmu_traverse.c +++ b/module/zfs/dmu_traverse.c @@ -74,6 +74,15 @@ static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp, static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *, uint64_t objset, uint64_t object); +static inline uint64_t +get_birth_time(traverse_data_t *td, const blkptr_t *bp) +{ + if (td->td_flags & TRAVERSE_LOGICAL) + return (BP_GET_LOGICAL_BIRTH(bp)); + else + return (BP_GET_BIRTH(bp)); +} + static int traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) @@ -85,7 +94,7 @@ traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, return (0); if (claim_txg == 0 && - BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(td->td_spa)) + get_birth_time(td, bp) >= spa_min_claim_txg(td->td_spa)) return (-1); SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, @@ -110,7 +119,7 @@ traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, if (BP_IS_HOLE(bp)) return (0); - if (claim_txg == 0 || BP_GET_LOGICAL_BIRTH(bp) < claim_txg) + if (claim_txg == 0 || get_birth_time(td, bp) < claim_txg) return (0); ASSERT3U(BP_GET_LSIZE(bp), !=, 0); @@ -194,7 +203,7 @@ traverse_prefetch_metadata(traverse_data_t *td, const dnode_phys_t *dnp, */ if (resume_skip_check(td, dnp, zb) != RESUME_SKIP_NONE) return (B_FALSE); - if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) <= td->td_min_txg) + if (BP_IS_HOLE(bp) || get_birth_time(td, bp) <= td->td_min_txg) return (B_FALSE); if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE) return (B_FALSE); @@ -265,7 +274,7 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp, zb->zb_object == DMU_META_DNODE_OBJECT) && td->td_hole_birth_enabled_txg <= td->td_min_txg) return (0); - } else if (BP_GET_LOGICAL_BIRTH(bp) <= td->td_min_txg) { + } else if (get_birth_time(td, bp) <= td->td_min_txg) { return (0); } diff --git a/module/zfs/dsl_bookmark.c b/module/zfs/dsl_bookmark.c index e301fe19f645..fdc8b7b198f0 100644 --- a/module/zfs/dsl_bookmark.c +++ b/module/zfs/dsl_bookmark.c @@ -1523,7 +1523,7 @@ dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) * If the block was live (referenced) at the time of this * bookmark, add its space to the bookmark's FBN. */ - if (BP_GET_LOGICAL_BIRTH(bp) <= + if (BP_GET_BIRTH(bp) <= dbn->dbn_phys.zbm_creation_txg && (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) { mutex_enter(&dbn->dbn_lock); diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index c0a7872c40ad..c4dcbebaa8ea 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -159,7 +159,7 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) return; } - ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), >, + ASSERT3U(BP_GET_BIRTH(bp), >, dsl_dataset_phys(ds)->ds_prev_snap_txg); dmu_buf_will_dirty(ds->ds_dbuf, tx); mutex_enter(&ds->ds_lock); @@ -194,7 +194,7 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) * they do not need to be freed. */ if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && - BP_GET_LOGICAL_BIRTH(bp) > ds->ds_dir->dd_origin_txg && + BP_GET_BIRTH(bp) > ds->ds_dir->dd_origin_txg && !(BP_IS_EMBEDDED(bp))) { ASSERT(dsl_dir_is_clone(ds->ds_dir)); ASSERT(spa_feature_is_enabled(spa, @@ -263,7 +263,7 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, return (0); ASSERT(dmu_tx_is_syncing(tx)); - ASSERT(BP_GET_LOGICAL_BIRTH(bp) <= tx->tx_txg); + ASSERT(BP_GET_BIRTH(bp) <= tx->tx_txg); if (ds == NULL) { dsl_free(tx->tx_pool, tx->tx_txg, bp); @@ -281,7 +281,7 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, * they do not need to be freed. */ if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && - BP_GET_LOGICAL_BIRTH(bp) > ds->ds_dir->dd_origin_txg && + BP_GET_BIRTH(bp) > ds->ds_dir->dd_origin_txg && !(BP_IS_EMBEDDED(bp))) { ASSERT(dsl_dir_is_clone(ds->ds_dir)); ASSERT(spa_feature_is_enabled(spa, @@ -289,7 +289,7 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, bplist_append(&ds->ds_dir->dd_pending_frees, bp); } - if (BP_GET_LOGICAL_BIRTH(bp) > dsl_dataset_phys(ds)->ds_prev_snap_txg) { + if (BP_GET_BIRTH(bp) > dsl_dataset_phys(ds)->ds_prev_snap_txg) { int64_t delta; /* @@ -346,14 +346,14 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0); /* if (logical birth > prev prev snap txg) prev unique += bs */ if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == - ds->ds_object && BP_GET_LOGICAL_BIRTH(bp) > + ds->ds_object && BP_GET_BIRTH(bp) > dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) { dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); mutex_enter(&ds->ds_prev->ds_lock); dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used; mutex_exit(&ds->ds_prev->ds_lock); } - if (BP_GET_LOGICAL_BIRTH(bp) > ds->ds_dir->dd_origin_txg) { + if (BP_GET_BIRTH(bp) > ds->ds_dir->dd_origin_txg) { dsl_dir_transfer_space(ds->ds_dir, used, DD_USED_HEAD, DD_USED_SNAP, tx); } @@ -2944,7 +2944,7 @@ dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap) if (snap == NULL) return (B_FALSE); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); - birth = BP_GET_LOGICAL_BIRTH(dsl_dataset_get_blkptr(ds)); + birth = BP_GET_BIRTH(dsl_dataset_get_blkptr(ds)); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (birth > dsl_dataset_phys(snap)->ds_creation_txg) { objset_t *os, *os_snap; diff --git a/module/zfs/dsl_deadlist.c b/module/zfs/dsl_deadlist.c index 3113d932fb68..9ffc998ac173 100644 --- a/module/zfs/dsl_deadlist.c +++ b/module/zfs/dsl_deadlist.c @@ -484,7 +484,7 @@ dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, boolean_t bp_freed, dl->dl_phys->dl_comp += sign * BP_GET_PSIZE(bp); dl->dl_phys->dl_uncomp += sign * BP_GET_UCSIZE(bp); - dle_tofind.dle_mintxg = BP_GET_LOGICAL_BIRTH(bp); + dle_tofind.dle_mintxg = BP_GET_BIRTH(bp); dle = avl_find(&dl->dl_tree, &dle_tofind, &where); if (dle == NULL) dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE); @@ -493,7 +493,7 @@ dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, boolean_t bp_freed, if (dle == NULL) { zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu", - bp, (longlong_t)BP_GET_LOGICAL_BIRTH(bp)); + bp, (longlong_t)BP_GET_BIRTH(bp)); dle = avl_first(&dl->dl_tree); } diff --git a/module/zfs/dsl_destroy.c b/module/zfs/dsl_destroy.c index f5ec93b2dc5c..fff49c97f4d2 100644 --- a/module/zfs/dsl_destroy.c +++ b/module/zfs/dsl_destroy.c @@ -133,11 +133,11 @@ process_old_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) ASSERT(!BP_IS_HOLE(bp)); - if (BP_GET_LOGICAL_BIRTH(bp) <= + if (BP_GET_BIRTH(bp) <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) { dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, bp_freed, tx); if (poa->ds_prev && !poa->after_branch_point && - BP_GET_LOGICAL_BIRTH(bp) > + BP_GET_BIRTH(bp) > dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) { dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes += bp_get_dsize_sync(dp->dp_spa, bp); @@ -315,8 +315,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx) ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock)); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); - ASSERT3U(BP_GET_LOGICAL_BIRTH(&dsl_dataset_phys(ds)->ds_bp), <=, - tx->tx_txg); + ASSERT3U(BP_GET_BIRTH(&dsl_dataset_phys(ds)->ds_bp), <=, tx->tx_txg); rrw_exit(&ds->ds_bp_rwlock, FTAG); ASSERT(zfs_refcount_is_zero(&ds->ds_longholds)); @@ -730,7 +729,7 @@ kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp); } else { ASSERT(zilog == NULL); - ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), >, + ASSERT3U(BP_GET_BIRTH(bp), >, dsl_dataset_phys(ka->ds)->ds_prev_snap_txg); (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE); } @@ -1020,8 +1019,7 @@ dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx) ASSERT(ds->ds_prev == NULL || dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); - ASSERT3U(BP_GET_LOGICAL_BIRTH(&dsl_dataset_phys(ds)->ds_bp), <=, - tx->tx_txg); + ASSERT3U(BP_GET_BIRTH(&dsl_dataset_phys(ds)->ds_bp), <=, tx->tx_txg); rrw_exit(&ds->ds_bp_rwlock, FTAG); ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock)); diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index f1088d87208b..4f1f66b835f2 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -1056,7 +1056,7 @@ upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) * will be wrong. */ rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); - ASSERT0(BP_GET_LOGICAL_BIRTH(&dsl_dataset_phys(prev)->ds_bp)); + ASSERT0(BP_GET_BIRTH(&dsl_dataset_phys(prev)->ds_bp)); rrw_exit(&ds->ds_bp_rwlock, FTAG); /* The origin doesn't get attached to itself */ diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 1b2cd3e361d1..5052992d775c 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -454,7 +454,7 @@ static inline void bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) { sio->sio_blk_prop = bp->blk_prop; - sio->sio_phys_birth = BP_GET_PHYSICAL_BIRTH(bp); + sio->sio_phys_birth = BP_GET_RAW_PHYSICAL_BIRTH(bp); sio->sio_birth = BP_GET_LOGICAL_BIRTH(bp); sio->sio_cksum = bp->blk_cksum; sio->sio_nr_dvas = BP_GET_NDVAS(bp); @@ -1768,7 +1768,7 @@ dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, ASSERT(!BP_IS_REDACTED(bp)); if (BP_IS_HOLE(bp) || - BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) + BP_GET_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) return (0); /* @@ -1778,7 +1778,7 @@ dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, * scrub there's nothing to do to it). */ if (claim_txg == 0 && - BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(dp->dp_spa)) + BP_GET_BIRTH(bp) >= spa_min_claim_txg(dp->dp_spa)) return (0); SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], @@ -1804,7 +1804,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, ASSERT(!BP_IS_REDACTED(bp)); if (BP_IS_HOLE(bp) || - BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) + BP_GET_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) return (0); /* @@ -1812,7 +1812,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, * already txg sync'ed (but this log block contains * other records that are not synced) */ - if (claim_txg == 0 || BP_GET_LOGICAL_BIRTH(bp) < claim_txg) + if (claim_txg == 0 || BP_GET_BIRTH(bp) < claim_txg) return (0); ASSERT3U(BP_GET_LSIZE(bp), !=, 0); @@ -1952,7 +1952,7 @@ dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) return; if (BP_IS_HOLE(bp) || - BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg || + BP_GET_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg || (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) return; @@ -2223,7 +2223,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, if (dnp != NULL && dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) { scn->scn_phys.scn_errors++; - spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp)); + spa_log_error(spa, zb, BP_GET_PHYSICAL_BIRTH(bp)); return (SET_ERROR(EINVAL)); } @@ -2319,7 +2319,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, * by arc_read() for the cases above. */ scn->scn_phys.scn_errors++; - spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp)); + spa_log_error(spa, zb, BP_GET_PHYSICAL_BIRTH(bp)); return (SET_ERROR(EINVAL)); } @@ -2396,7 +2396,12 @@ dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb, if (f != SPA_FEATURE_NONE) ASSERT(dsl_dataset_feature_is_active(ds, f)); - if (BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) { + /* + * Recurse any blocks that were written either logically or physically + * at or after cur_min_txg. About logical birth we care for traversal, + * looking for any changes, while about physical for the actual scan. + */ + if (BP_GET_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) { scn->scn_lt_min_this_txg++; return; } @@ -2422,7 +2427,7 @@ dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb, * Don't scan it now unless we need to because something * under it was modified. */ - if (BP_GET_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { + if (BP_GET_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { scn->scn_gt_max_this_txg++; return; } @@ -4806,7 +4811,7 @@ dsl_scan_scrub_cb(dsl_pool_t *dp, { dsl_scan_t *scn = dp->dp_scan; spa_t *spa = dp->dp_spa; - uint64_t phys_birth = BP_GET_BIRTH(bp); + uint64_t phys_birth = BP_GET_PHYSICAL_BIRTH(bp); size_t psize = BP_GET_PSIZE(bp); boolean_t needs_io = B_FALSE; int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 69484d404eef..8de1cb68631e 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -5575,7 +5575,21 @@ remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; uint64_t physical_birth = vdev_indirect_births_physbirth(vib, DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); - BP_SET_PHYSICAL_BIRTH(bp, physical_birth); + + /* + * For rewritten blocks, use the old physical birth as the new logical + * birth (representing when the space was allocated) and the removal + * time as the new physical birth (representing when it was actually + * written). + */ + if (BP_GET_REWRITE(bp)) { + uint64_t old_physical_birth = BP_GET_PHYSICAL_BIRTH(bp); + ASSERT3U(old_physical_birth, <, physical_birth); + BP_SET_BIRTH(bp, old_physical_birth, physical_birth); + BP_SET_REWRITE(bp, 0); + } else { + BP_SET_PHYSICAL_BIRTH(bp, physical_birth); + } DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); DVA_SET_OFFSET(&bp->blk_dva[0], offset); @@ -5944,7 +5958,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize, int error = 0; ASSERT0(BP_GET_LOGICAL_BIRTH(bp)); - ASSERT0(BP_GET_PHYSICAL_BIRTH(bp)); + ASSERT0(BP_GET_RAW_PHYSICAL_BIRTH(bp)); spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); @@ -6006,7 +6020,7 @@ metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) int ndvas = BP_GET_NDVAS(bp); ASSERT(!BP_IS_HOLE(bp)); - ASSERT(!now || BP_GET_LOGICAL_BIRTH(bp) >= spa_syncing_txg(spa)); + ASSERT(!now || BP_GET_BIRTH(bp) >= spa_syncing_txg(spa)); /* * If we have a checkpoint for the pool we need to make sure that @@ -6024,7 +6038,7 @@ metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) * normally as they will be referenced by the checkpointed uberblock. */ boolean_t checkpoint = B_FALSE; - if (BP_GET_LOGICAL_BIRTH(bp) <= spa->spa_checkpoint_txg && + if (BP_GET_BIRTH(bp) <= spa->spa_checkpoint_txg && spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { /* * At this point, if the block is part of the checkpoint diff --git a/module/zfs/spa.c b/module/zfs/spa.c index c0876c935405..33915344f827 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -2718,8 +2718,8 @@ spa_claim_notify(zio_t *zio) return; mutex_enter(&spa->spa_props_lock); /* any mutex will do */ - if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp)) - spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp); + if (spa->spa_claim_max_txg < BP_GET_BIRTH(zio->io_bp)) + spa->spa_claim_max_txg = BP_GET_BIRTH(zio->io_bp); mutex_exit(&spa->spa_props_lock); } diff --git a/module/zfs/spa_errlog.c b/module/zfs/spa_errlog.c index 3e08f261fda1..7252fd534bdf 100644 --- a/module/zfs/spa_errlog.c +++ b/module/zfs/spa_errlog.c @@ -253,7 +253,7 @@ find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep, if (error == 0 && BP_IS_HOLE(&bp)) error = SET_ERROR(ENOENT); - *birth_txg = BP_GET_LOGICAL_BIRTH(&bp); + *birth_txg = BP_GET_PHYSICAL_BIRTH(&bp); rw_exit(&dn->dn_struct_rwlock); dnode_rele(dn, FTAG); return (error); @@ -885,7 +885,7 @@ sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj, if (error == EACCES) error = 0; else if (!error) - zep.zb_birth = BP_GET_LOGICAL_BIRTH(&bp); + zep.zb_birth = BP_GET_PHYSICAL_BIRTH(&bp); rw_exit(&dn->dn_struct_rwlock); dnode_rele(dn, FTAG); diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c index 2b78340cf707..18efdaac006f 100644 --- a/module/zfs/vdev_mirror.c +++ b/module/zfs/vdev_mirror.c @@ -532,7 +532,7 @@ vdev_mirror_child_select(zio_t *zio) uint64_t txg = zio->io_txg; int c, lowest_load; - ASSERT(zio->io_bp == NULL || BP_GET_BIRTH(zio->io_bp) == txg); + ASSERT(zio->io_bp == NULL || BP_GET_PHYSICAL_BIRTH(zio->io_bp) == txg); lowest_load = INT_MAX; mm->mm_preferred_cnt = 0; diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c index ecb6c7f50b4d..9486a3f29e30 100644 --- a/module/zfs/vdev_raidz.c +++ b/module/zfs/vdev_raidz.c @@ -2206,11 +2206,7 @@ vdev_raidz_close(vdev_t *vd) /* * Return the logical width to use, given the txg in which the allocation - * happened. Note that BP_GET_BIRTH() is usually the txg in which the - * BP was allocated. Remapped BP's (that were relocated due to device - * removal, see remap_blkptr_cb()), will have a more recent physical birth - * which reflects when the BP was relocated, but we can ignore these because - * they can't be on RAIDZ (device removal doesn't support RAIDZ). + * happened. */ static uint64_t vdev_raidz_get_logical_width(vdev_raidz_t *vdrz, uint64_t txg) @@ -2343,7 +2339,7 @@ vdev_raidz_io_verify(zio_t *zio, raidz_map_t *rm, raidz_row_t *rr, int col) logical_rs.rs_start = rr->rr_offset; logical_rs.rs_end = logical_rs.rs_start + vdev_raidz_psize_to_asize(zio->io_vd, rr->rr_size, - BP_GET_BIRTH(zio->io_bp)); + BP_GET_PHYSICAL_BIRTH(zio->io_bp)); raidz_col_t *rc = &rr->rr_col[col]; vdev_t *cvd = zio->io_vd->vdev_child[rc->rc_devidx]; @@ -2566,7 +2562,7 @@ vdev_raidz_io_start(zio_t *zio) raidz_map_t *rm; uint64_t logical_width = vdev_raidz_get_logical_width(vdrz, - BP_GET_BIRTH(zio->io_bp)); + BP_GET_PHYSICAL_BIRTH(zio->io_bp)); if (logical_width != vdrz->vd_physical_width) { zfs_locked_range_t *lr = NULL; uint64_t synced_offset = UINT64_MAX; diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index dfffcc4a4040..0be6b63945c5 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -1103,13 +1104,21 @@ zfs_rewrite(znode_t *zp, uint64_t off, uint64_t len, uint64_t flags, { int error; - if (flags != 0 || arg != 0) + if ((flags & ~ZFS_REWRITE_PHYSICAL) != 0 || arg != 0) return (SET_ERROR(EINVAL)); zfsvfs_t *zfsvfs = ZTOZSB(zp); if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0) return (error); + /* Check if physical rewrite is allowed */ + spa_t *spa = zfsvfs->z_os->os_spa; + if ((flags & ZFS_REWRITE_PHYSICAL) && + !spa_feature_is_enabled(spa, SPA_FEATURE_PHYSICAL_REWRITE)) { + zfs_exit(zfsvfs, FTAG); + return (SET_ERROR(ENOTSUP)); + } + if (zfs_is_readonly(zfsvfs)) { zfs_exit(zfsvfs, FTAG); return (SET_ERROR(EROFS)); @@ -1197,7 +1206,10 @@ zfs_rewrite(znode_t *zp, uint64_t off, uint64_t len, uint64_t flags, if (dmu_buf_is_dirty(dbp[i], tx)) continue; nw += dbp[i]->db_size; - dmu_buf_will_dirty(dbp[i], tx); + if (flags & ZFS_REWRITE_PHYSICAL) + dmu_buf_will_rewrite(dbp[i], tx); + else + dmu_buf_will_dirty(dbp[i], tx); } dmu_buf_rele_array(dbp, numbufs, FTAG); diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 3aa188a95817..6e4f84257407 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -589,7 +589,7 @@ zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, * that we rewind to is invalid. Thus, we return -1 so * zil_parse() doesn't attempt to read it. */ - if (BP_GET_LOGICAL_BIRTH(bp) >= first_txg) + if (BP_GET_BIRTH(bp) >= first_txg) return (-1); if (zil_bp_tree_add(zilog, bp) != 0) @@ -615,7 +615,7 @@ zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, * Claim log block if not already committed and not already claimed. * If tx == NULL, just verify that the block is claimable. */ - if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) < first_txg || + if (BP_IS_HOLE(bp) || BP_GET_BIRTH(bp) < first_txg || zil_bp_tree_add(zilog, bp) != 0) return (0); @@ -640,7 +640,7 @@ zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) * waited for all writes to be stable first), so it is semantically * correct to declare this the end of the log. */ - if (BP_GET_LOGICAL_BIRTH(&lr->lr_blkptr) >= first_txg) { + if (BP_GET_BIRTH(&lr->lr_blkptr) >= first_txg) { error = zil_read_log_data(zilog, lr, NULL); if (error != 0) return (error); @@ -687,7 +687,7 @@ zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx, * just in case lets be safe and just stop here now instead of * corrupting the pool. */ - if (BP_GET_BIRTH(bp) >= first_txg) + if (BP_GET_PHYSICAL_BIRTH(bp) >= first_txg) return (SET_ERROR(ENOENT)); /* @@ -742,7 +742,7 @@ zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg) /* * If we previously claimed it, we need to free it. */ - if (BP_GET_LOGICAL_BIRTH(bp) >= claim_txg && + if (BP_GET_BIRTH(bp) >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && !BP_IS_HOLE(bp)) { zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); } @@ -1997,7 +1997,7 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) &slog); } if (error == 0) { - ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), ==, txg); + ASSERT3U(BP_GET_BIRTH(bp), ==, txg); BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); bp->blk_cksum = lwb->lwb_blk.blk_cksum; diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 7e4caaa83ee9..c4e86baec1f5 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -692,7 +692,7 @@ zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) zio->io_error = SET_ERROR(EIO); if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { spa_log_error(spa, &zio->io_bookmark, - BP_GET_LOGICAL_BIRTH(zio->io_bp)); + BP_GET_PHYSICAL_BIRTH(zio->io_bp)); (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, spa, NULL, &zio->io_bookmark, zio, 0); } @@ -1104,7 +1104,8 @@ zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, "DVA[1]=%#llx/%#llx " "DVA[2]=%#llx/%#llx " "prop=%#llx " - "pad=%#llx,%#llx " + "prop2=%#llx " + "pad=%#llx " "phys_birth=%#llx " "birth=%#llx " "fill=%#llx " @@ -1117,9 +1118,9 @@ zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, (long long)bp->blk_dva[2].dva_word[0], (long long)bp->blk_dva[2].dva_word[1], (long long)bp->blk_prop, - (long long)bp->blk_pad[0], - (long long)bp->blk_pad[1], - (long long)BP_GET_PHYSICAL_BIRTH(bp), + (long long)bp->blk_prop2, + (long long)bp->blk_pad, + (long long)BP_GET_RAW_PHYSICAL_BIRTH(bp), (long long)BP_GET_LOGICAL_BIRTH(bp), (long long)bp->blk_fill, (long long)bp->blk_cksum.zc_word[0], @@ -1334,7 +1335,7 @@ zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, { zio_t *zio; - zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp, + zio = zio_create(pio, spa, BP_GET_PHYSICAL_BIRTH(bp), bp, data, size, size, done, private, ZIO_TYPE_READ, priority, flags, NULL, 0, zb, ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? @@ -1854,7 +1855,7 @@ zio_write_bp_init(zio_t *zio) blkptr_t *bp = zio->io_bp; zio_prop_t *zp = &zio->io_prop; - ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg); + ASSERT(BP_GET_BIRTH(bp) != zio->io_txg); *bp = *zio->io_bp_override; zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; @@ -1942,7 +1943,7 @@ zio_write_compress(zio_t *zio) ASSERT(zio->io_child_type != ZIO_CHILD_DDT); ASSERT(zio->io_bp_override == NULL); - if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) { + if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg) { /* * We're rewriting an existing block, which means we're * working on behalf of spa_sync(). For spa_sync() to @@ -2079,7 +2080,7 @@ zio_write_compress(zio_t *zio) * spa_sync() to allocate new blocks, but force rewrites after that. * There should only be a handful of blocks after pass 1 in any case. */ - if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg && + if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg && BP_GET_PSIZE(bp) == psize && pass >= zfs_sync_pass_rewrite) { VERIFY3U(psize, !=, 0); @@ -3894,7 +3895,7 @@ zio_ddt_write(zio_t *zio) * block and leave. */ if (have_dvas == 0) { - ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg); + ASSERT(BP_GET_BIRTH(bp) == txg); ASSERT(BP_EQUAL(bp, zio->io_bp_override)); ddt_phys_extend(ddp, v, bp); ddt_phys_addref(ddp, v); @@ -3922,6 +3923,23 @@ zio_ddt_write(zio_t *zio) * then we can just use them as-is. */ if (have_dvas >= need_dvas) { + /* + * For rewrite operations, try preserving the original + * logical birth time. If the result matches the + * original BP, this becomes a NOP. + */ + if (zp->zp_rewrite) { + uint64_t orig_logical_birth = + BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig); + ddt_bp_fill(ddp, v, bp, orig_logical_birth); + if (BP_EQUAL(bp, &zio->io_bp_orig)) { + /* We can skip accounting. */ + zio->io_flags |= ZIO_FLAG_NOPWRITE; + ddt_exit(ddt); + return (zio); + } + } + ddt_bp_fill(ddp, v, bp, txg); ddt_phys_addref(ddp, v); ddt_exit(ddt); @@ -4224,8 +4242,10 @@ zio_dva_allocate(zio_t *zio) ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_GANG); memcpy(zio->io_bp->blk_dva, zio->io_bp_orig.blk_dva, 3 * sizeof (dva_t)); - BP_SET_BIRTH(zio->io_bp, BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig), - BP_GET_PHYSICAL_BIRTH(&zio->io_bp_orig)); + BP_SET_LOGICAL_BIRTH(zio->io_bp, + BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig)); + BP_SET_PHYSICAL_BIRTH(zio->io_bp, + BP_GET_RAW_PHYSICAL_BIRTH(&zio->io_bp_orig)); return (zio); } @@ -4352,6 +4372,15 @@ zio_dva_allocate(zio_t *zio) error); } zio->io_error = error; + } else if (zio->io_prop.zp_rewrite) { + /* + * For rewrite operations, preserve the logical birth time + * but set the physical birth time to the current txg. + */ + uint64_t logical_birth = BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig); + ASSERT3U(logical_birth, <=, zio->io_txg); + BP_SET_BIRTH(zio->io_bp, logical_birth, zio->io_txg); + BP_SET_REWRITE(zio->io_bp, 1); } return (zio); @@ -4385,12 +4414,11 @@ zio_dva_claim(zio_t *zio) static void zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) { - ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp)); + ASSERT(BP_GET_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp)); ASSERT(zio->io_bp_override == NULL); if (!BP_IS_HOLE(bp)) { - metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp), - B_TRUE); + metaslab_free(zio->io_spa, bp, BP_GET_BIRTH(bp), B_TRUE); } if (gn != NULL) { @@ -5268,7 +5296,7 @@ zio_ready(zio_t *zio) if (zio->io_ready) { ASSERT(IO_IS_ALLOCATING(zio)); - ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || + ASSERT(BP_GET_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE)); ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); @@ -5423,8 +5451,6 @@ zio_done(zio_t *zio) ASSERT(zio->io_children[c][w] == 0); if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { - ASSERT(zio->io_bp->blk_pad[0] == 0); - ASSERT(zio->io_bp->blk_pad[1] == 0); ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || (zio->io_bp == zio_unique_parent(zio)->io_bp)); @@ -5539,7 +5565,7 @@ zio_done(zio_t *zio) * error and generate a logical data ereport. */ spa_log_error(zio->io_spa, &zio->io_bookmark, - BP_GET_LOGICAL_BIRTH(zio->io_bp)); + BP_GET_PHYSICAL_BIRTH(zio->io_bp)); (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa, NULL, &zio->io_bookmark, zio, 0); } diff --git a/module/zfs/zio_checksum.c b/module/zfs/zio_checksum.c index 8cec3a6f562b..83ad2857b146 100644 --- a/module/zfs/zio_checksum.c +++ b/module/zfs/zio_checksum.c @@ -279,7 +279,7 @@ static void zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp) { const dva_t *dva = BP_IDENTITY(bp); - uint64_t txg = BP_GET_BIRTH(bp); + uint64_t txg = BP_GET_PHYSICAL_BIRTH(bp); ASSERT(BP_IS_GANG(bp)); diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run index 7cc7a3cf94f4..e73d913bc7b0 100644 --- a/tests/runfiles/common.run +++ b/tests/runfiles/common.run @@ -308,7 +308,7 @@ tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos'] tags = ['functional', 'cli_root', 'zfs_reservation'] [tests/functional/cli_root/zfs_rewrite] -tests = ['zfs_rewrite'] +tests = ['zfs_rewrite', 'zfs_rewrite_physical'] tags = ['functional', 'cli_root', 'zfs_rewrite'] [tests/functional/cli_root/zfs_rollback] diff --git a/tests/runfiles/sanity.run b/tests/runfiles/sanity.run index 732f252b52d2..7767c0c2d535 100644 --- a/tests/runfiles/sanity.run +++ b/tests/runfiles/sanity.run @@ -195,7 +195,7 @@ tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos'] tags = ['functional', 'cli_root', 'zfs_reservation'] [tests/functional/cli_root/zfs_rewrite] -tests = ['zfs_rewrite'] +tests = ['zfs_rewrite', 'zfs_rewrite_physical'] tags = ['functional', 'cli_root', 'zfs_rewrite'] [tests/functional/cli_root/zfs_rollback] diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index 388a4160736a..4de23ebe6006 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -869,6 +869,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/cli_root/zfs_rewrite/cleanup.ksh \ functional/cli_root/zfs_rewrite/setup.ksh \ functional/cli_root/zfs_rewrite/zfs_rewrite.ksh \ + functional/cli_root/zfs_rewrite/zfs_rewrite_physical.ksh \ functional/cli_root/zfs_rollback/cleanup.ksh \ functional/cli_root/zfs_rollback/setup.ksh \ functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_rewrite/zfs_rewrite_physical.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_rewrite/zfs_rewrite_physical.ksh new file mode 100755 index 000000000000..142e44f53515 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_rewrite/zfs_rewrite_physical.ksh @@ -0,0 +1,100 @@ +#!/bin/ksh -p +# SPDX-License-Identifier: CDDL-1.0 +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025, iXsystems, Inc. +# + +# DESCRIPTION: +# Verify zfs rewrite -P flag correctly preserves logical birth times. +# +# STRATEGY: +# 1. Create a test file and sync it. +# 2. Create a snapshot to capture the original birth time. +# 3. Test default rewrite behavior (updates logical birth time). +# 4. Test -P flag behavior (preserves logical birth time). +# 5. Verify incremental send behavior difference. + +. $STF_SUITE/include/libtest.shlib + +typeset tmp=$(mktemp) +typeset send_default=$(mktemp) +typeset send_physical=$(mktemp) + +function cleanup +{ + rm -rf $tmp $send_default $send_physical $TESTDIR/* + zfs destroy -R $TESTPOOL/$TESTFS@snap1 2>/dev/null || true + zfs destroy -R $TESTPOOL/$TESTFS@snap2 2>/dev/null || true + zfs destroy -R $TESTPOOL/$TESTFS@snap3 2>/dev/null || true +} + +log_assert "zfs rewrite -P flag correctly preserves logical birth times" + +log_onexit cleanup + +log_must zfs set recordsize=128k $TESTPOOL/$TESTFS + +# Create test file and initial snapshot +log_must dd if=/dev/urandom of=$TESTDIR/testfile bs=128k count=4 +log_must sync_pool $TESTPOOL +typeset orig_hash=$(xxh128digest $TESTDIR/testfile) +log_must zfs snapshot $TESTPOOL/$TESTFS@snap1 + +# Test default rewrite behavior (updates logical birth time) +log_must zfs rewrite $TESTDIR/testfile +log_must sync_pool $TESTPOOL +typeset default_hash=$(xxh128digest $TESTDIR/testfile) +log_must [ "$orig_hash" = "$default_hash" ] +log_must zfs snapshot $TESTPOOL/$TESTFS@snap2 + +# Test incremental send size - should be large with updated birth time +log_must eval "zfs send -i @snap1 $TESTPOOL/$TESTFS@snap2 > $send_default" +typeset default_size=$(wc -c < $send_default) +log_note "Default rewrite incremental send size: $default_size bytes" + +# Reset the file to original state +log_must zfs rollback -r $TESTPOOL/$TESTFS@snap1 + +# Test -P flag behavior (preserves logical birth time) +log_must zfs rewrite -P $TESTDIR/testfile +log_must sync_pool $TESTPOOL +typeset physical_hash=$(xxh128digest $TESTDIR/testfile) +log_must [ "$orig_hash" = "$physical_hash" ] +log_must zfs snapshot $TESTPOOL/$TESTFS@snap3 + +# Test incremental send size - should be minimal with preserved birth time +log_must eval "zfs send -i @snap1 $TESTPOOL/$TESTFS@snap3 > $send_physical" +typeset physical_size=$(wc -c < $send_physical) +log_note "Physical rewrite incremental send size: $physical_size bytes" + +# Verify that -P flag produces smaller incremental send +if [[ $physical_size -lt $default_size ]]; then + log_note "SUCCESS: -P flag produces smaller incremental send" \ + "($physical_size < $default_size)" +else + log_fail "FAIL: -P flag should produce smaller incremental send" \ + "($physical_size >= $default_size)" +fi + +log_pass diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg index 3389dcf72f89..bdf5fdf85cff 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg @@ -92,6 +92,7 @@ typeset -a properties=( "feature@draid" "feature@redaction_list_spill" "feature@dynamic_gang_header" + "feature@physical_rewrite" ) if is_linux || is_freebsd; then