Skip to content

Commit 954cab0

Browse files
jzulauf-lunargjeremyg-lunarg
authored andcommitted
syncval: New generic range traversal
1 parent 3e101a8 commit 954cab0

File tree

2 files changed

+83
-43
lines changed

2 files changed

+83
-43
lines changed

layers/containers/range_vector.h

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1626,6 +1626,60 @@ Iterator split(Iterator in, Map &map, const Range &range) {
16261626
return pos;
16271627
}
16281628

1629+
// Apply an operation over a range map, infilling where content is absent, updating where content is present.
1630+
// Trims to range boundaries.
1631+
// infill op doesn't have to alter map, but mustn't invalidate iterators passed to it. (i.e. no erasure)
1632+
// infill data (default mapped value or other initial value) is contained with ops.
1633+
// update allows existing ranges to be updated (merged, whatever) based on data contained in ops. All iterators
1634+
// passed to update are already trimmed to fit within range.
1635+
template <typename RangeMap, typename InfillUpdateOps>
1636+
void infill_update_range(RangeMap &map, const typename RangeMap::key_type &range, const InfillUpdateOps &ops) {
1637+
using KeyType = typename RangeMap::key_type;
1638+
using IndexType = typename RangeMap::index_type;
1639+
const auto map_end = map.end();
1640+
1641+
if (range.empty()) return;
1642+
1643+
// Starting with
1644+
auto pos = map.lower_bound(range);
1645+
if ((pos != map_end) && (range.begin > pos->first.begin)) {
1646+
// lower bound starts before the range, trim and advance
1647+
pos = map.split(pos, range.begin, sparse_container::split_op_keep_both());
1648+
++pos;
1649+
}
1650+
1651+
IndexType current_begin = range.begin;
1652+
while ((pos != map_end) && (current_begin < range.end)) {
1653+
// The current_begin is either pointing to the next existing value to update or the beginning of a gap to infill
1654+
assert(pos->first.begin >= current_begin);
1655+
1656+
if (current_begin < pos->first.begin) {
1657+
// We have a gap to infill (we supply pos for ("insert in front of" calls)
1658+
ops.infill(map, pos, KeyType(current_begin, std::min(range.end, pos->first.begin)));
1659+
// Advance current begin, but *not* pos as it's the next valid value. (infill shall not invalidate pos)
1660+
current_begin = pos->first.begin;
1661+
} else {
1662+
// We need to run the update operation on the valid portion of the current value
1663+
if (pos->first.end > range.end) {
1664+
// If this entry overlaps end-of-range we need to trim it to the range
1665+
pos = map.split(pos, range.end, sparse_container::split_op_keep_both());
1666+
}
1667+
1668+
// We have a valid fully contained range, merge with it
1669+
ops.update(pos);
1670+
1671+
// Advance the current location and map entry
1672+
current_begin = pos->first.end;
1673+
++pos;
1674+
}
1675+
}
1676+
1677+
// Fill to the end as needed
1678+
if (current_begin < range.end) {
1679+
ops.infill(map, pos, KeyType(current_begin, range.end));
1680+
}
1681+
}
1682+
16291683
// Parallel iterator
16301684
// Traverse to range maps over the the same range, but without assumptions of aligned ranges.
16311685
// ++ increments to the next point where on of the two maps changes range, giving a range over which the two

layers/sync/sync_validation.cpp

Lines changed: 29 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1651,47 +1651,35 @@ SyncStageAccessFlags SyncStageAccess::AccessScope(VkPipelineStageFlags2KHR stage
16511651
return AccessScopeByStage(stages) & AccessScopeByAccess(accesses);
16521652
}
16531653

1654+
// The semantics of the InfillUpdateOps of infill_update_range are slightly different than for the UpdateMemoryAccessState Action
1655+
// operations, as this simplifies the generic traversal. So we wrap them in a semantics Adapter to get the same effect.
16541656
template <typename Action>
1655-
void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
1656-
// TODO: Optimization for operations that do a pure overwrite (i.e. WRITE usages which rewrite the state, vs READ usages
1657-
// that do incrementalupdates
1658-
assert(accesses);
1659-
if (range.empty()) return;
1660-
auto pos = accesses->lower_bound(range);
1661-
if (pos == accesses->end() || !pos->first.intersects(range)) {
1662-
// The range is empty, fill it with a default value.
1663-
pos = action.Infill(accesses, pos, range);
1664-
} else if (range.begin < pos->first.begin) {
1665-
// Leading empty space, infill
1666-
pos = action.Infill(accesses, pos, ResourceAccessRange(range.begin, pos->first.begin));
1667-
} else if (pos->first.begin < range.begin) {
1668-
// Trim the beginning if needed
1669-
pos = accesses->split(pos, range.begin, sparse_container::split_op_keep_both());
1670-
++pos;
1671-
}
1672-
1673-
const auto the_end = accesses->end();
1674-
while ((pos != the_end) && pos->first.intersects(range)) {
1675-
if (pos->first.end > range.end) {
1676-
pos = accesses->split(pos, range.end, sparse_container::split_op_keep_both());
1677-
}
1678-
1679-
pos = action(accesses, pos);
1680-
if (pos == the_end) break;
1681-
1682-
auto next = pos;
1683-
++next;
1657+
struct ActionToOpsAdapter {
1658+
using Map = ResourceAccessRangeMap;
1659+
using Range = typename Map::key_type;
1660+
using Iterator = typename Map::iterator;
1661+
using IndexType = typename Map::index_type;
1662+
1663+
void infill(Map &accesses, const Iterator &pos, const Range &infill_range) const {
1664+
// Combine Infill and update operations to make the generic implementation simpler
1665+
Iterator infill = action.Infill(&accesses, pos, infill_range);
1666+
if (infill == accesses.end()) return; // Allow action to 'pass' on filling in the blanks
1667+
1668+
// Need to apply the action to the Infill. 'infill_update_range' expect ops.infill to be completely done with
1669+
// the infill_range, where as Action::Infill assumes the caller will apply the action() logic to the infill_range
1670+
for (; infill != pos; ++infill) {
1671+
assert(infill != accesses.end());
1672+
action(infill);
1673+
}
1674+
}
1675+
void update(const Iterator &pos) const { action(pos); }
1676+
const Action &action;
1677+
};
16841678

1685-
// Do gap infill or infill to end of range, if needed.
1686-
if (pos->first.end < range.end) {
1687-
VkDeviceSize limit = (next == the_end) ? range.end : std::min(range.end, next->first.begin);
1688-
ResourceAccessRange new_range(pos->first.end, limit);
1689-
if (new_range.non_empty()) {
1690-
next = action.Infill(accesses, next, new_range);
1691-
}
1692-
}
1693-
pos = next;
1694-
}
1679+
template <typename Action>
1680+
void UpdateMemoryAccessState(ResourceAccessRangeMap *accesses, const ResourceAccessRange &range, const Action &action) {
1681+
ActionToOpsAdapter<Action> ops{action};
1682+
infill_update_range(*accesses, range, ops);
16951683
}
16961684

16971685
// Give a comparable interface for range generators and ranges
@@ -1726,10 +1714,9 @@ struct UpdateMemoryAccessStateFunctor {
17261714
return accesses->lower_bound(range);
17271715
}
17281716

1729-
Iterator operator()(ResourceAccessRangeMap *accesses, const Iterator &pos) const {
1717+
void operator()(const Iterator &pos) const {
17301718
auto &access_state = pos->second;
17311719
access_state.Update(usage, ordering_rule, tag);
1732-
return pos;
17331720
}
17341721

17351722
UpdateMemoryAccessStateFunctor(AccessAddressType type_, const AccessContext &context_, SyncStageAccessIndex usage_,
@@ -1801,7 +1788,7 @@ class ApplyBarrierOpsFunctor {
18011788
return inserted;
18021789
}
18031790

1804-
Iterator operator()(ResourceAccessRangeMap *accesses, const Iterator &pos) const {
1791+
void operator()(const Iterator &pos) const {
18051792
auto &access_state = pos->second;
18061793
for (const auto &op : barrier_ops_) {
18071794
op(&access_state);
@@ -1812,7 +1799,6 @@ class ApplyBarrierOpsFunctor {
18121799
// another walk
18131800
access_state.ApplyPendingBarriers(tag_);
18141801
}
1815-
return pos;
18161802
}
18171803

18181804
// A valid tag is required IFF layout_transition is true, as transitions are write ops

0 commit comments

Comments
 (0)