@@ -32,6 +32,42 @@ class ValueStoreNotPrintable {};
32
32
33
33
} // namespace Internal
34
34
35
+ struct CheckIRIdTag {
36
+ CheckIRIdTag ()
37
+ : check_id_tag_(0 ),
38
+ initial_reserved_ids_ (std::numeric_limits<int32_t >::max()) {}
39
+ explicit CheckIRIdTag (int32_t check_id_index, int32_t initial_reserved_ids)
40
+ : initial_reserved_ids_(initial_reserved_ids) {
41
+ // Shift down by 1 to get out of the high bit to avoid using any negative
42
+ // ids, since they have special uses.
43
+ check_id_tag_ = llvm::reverseBits ((((check_id_index + 1 ) << 1 ) | 1 ) << 1 );
44
+ }
45
+ auto Apply (int32_t index) const -> int32_t {
46
+ if (index < initial_reserved_ids_) {
47
+ return index;
48
+ }
49
+ // assert that check_id_tag_ doesn't have the second highest bit set
50
+ return index ^ check_id_tag_;
51
+ }
52
+ auto Remove (int32_t tagged_index) const -> int32_t {
53
+ if ((llvm::reverseBits (2 ) & tagged_index) == 0 ) {
54
+ CARBON_CHECK (tagged_index < initial_reserved_ids_);
55
+ return tagged_index;
56
+ }
57
+ auto index = tagged_index ^ check_id_tag_;
58
+ CARBON_CHECK (index >= initial_reserved_ids_);
59
+ return index;
60
+ }
61
+ int32_t check_id_tag_;
62
+ int32_t initial_reserved_ids_;
63
+ };
64
+
65
+ template <typename ValueStoreT>
66
+ auto GetCheckIRIdTag (const ValueStoreT& value_store) {
67
+ (void )value_store;
68
+ return CheckIRIdTag ();
69
+ }
70
+
35
71
// A simple wrapper for accumulating values, providing IDs to later retrieve the
36
72
// value. This does not do deduplication.
37
73
template <typename IdT, typename ValueT>
@@ -74,6 +110,7 @@ class ValueStore
74
110
};
75
111
76
112
ValueStore () = default ;
113
+ explicit ValueStore (CheckIRIdTag tag) : tag_(tag) {}
77
114
78
115
// Stores the value and returns an ID to reference it.
79
116
auto Add (ValueType value) -> IdType {
@@ -82,8 +119,8 @@ class ValueStore
82
119
// tracking down issues easier.
83
120
CARBON_DCHECK (size_ < std::numeric_limits<int32_t >::max (), " Id overflow" );
84
121
85
- IdType id (size_);
86
- auto [chunk_index, pos] = IdToChunkIndices (id );
122
+ IdType id (tag_. Apply ( size_) );
123
+ auto [chunk_index, pos] = IdToChunkIndices (size_ );
87
124
++size_;
88
125
89
126
CARBON_DCHECK (static_cast <size_t >(chunk_index) <= chunks_.size (),
@@ -99,17 +136,19 @@ class ValueStore
99
136
100
137
// Returns a mutable value for an ID.
101
138
auto Get (IdType id) -> RefType {
102
- CARBON_DCHECK (id.index >= 0 , " {0}" , id);
103
- CARBON_DCHECK (id.index < size_, " {0}" , id);
104
- auto [chunk_index, pos] = IdToChunkIndices (id);
139
+ auto index = tag_.Remove (id.index );
140
+ CARBON_DCHECK (index >= 0 , " {0}" , index);
141
+ CARBON_DCHECK (index < size_, " {0}" , index);
142
+ auto [chunk_index, pos] = IdToChunkIndices (index);
105
143
return chunks_[chunk_index].Get (pos);
106
144
}
107
145
108
146
// Returns the value for an ID.
109
147
auto Get (IdType id) const -> ConstRefType {
110
- CARBON_DCHECK (id.index >= 0 , " {0}" , id);
111
- CARBON_DCHECK (id.index < size_, " {0}" , id);
112
- auto [chunk_index, pos] = IdToChunkIndices (id);
148
+ auto index = tag_.Remove (id.index );
149
+ CARBON_DCHECK (index >= 0 , " {0}" , index);
150
+ CARBON_DCHECK (index < size_, " {0}" , index);
151
+ auto [chunk_index, pos] = IdToChunkIndices (index);
113
152
return chunks_[chunk_index].Get (pos);
114
153
}
115
154
@@ -118,7 +157,7 @@ class ValueStore
118
157
if (size <= size_) {
119
158
return ;
120
159
}
121
- auto [final_chunk_index, _] = IdToChunkIndices (IdType ( size - 1 ) );
160
+ auto [final_chunk_index, _] = IdToChunkIndices (size - 1 );
122
161
chunks_.resize (final_chunk_index + 1 );
123
162
}
124
163
@@ -128,10 +167,10 @@ class ValueStore
128
167
return ;
129
168
}
130
169
131
- auto [begin_chunk_index, begin_pos] = IdToChunkIndices (IdType ( size_) );
170
+ auto [begin_chunk_index, begin_pos] = IdToChunkIndices (size_);
132
171
// Use an inclusive range so that if `size` would be the next chunk, we
133
172
// don't try doing something with it.
134
- auto [end_chunk_index, end_pos] = IdToChunkIndices (IdType ( size - 1 ) );
173
+ auto [end_chunk_index, end_pos] = IdToChunkIndices (size - 1 );
135
174
chunks_.resize (end_chunk_index + 1 );
136
175
137
176
// If the begin and end chunks are the same, we only fill from begin to end.
@@ -192,7 +231,8 @@ class ValueStore
192
231
// `mapped_iterator` incorrectly infers the pointer type for `PointerProxy`.
193
232
// NOLINTNEXTLINE(readability-const-return-type)
194
233
auto index_to_id = [&](int32_t i) -> const std::pair<IdType, ConstRefType> {
195
- return std::pair<IdType, ConstRefType>(IdType (i), Get (IdType (i)));
234
+ IdType id (tag_.Apply (i));
235
+ return std::pair<IdType, ConstRefType>(id, Get (id));
196
236
};
197
237
// Because indices into `ValueStore` are all sequential values from 0, we
198
238
// can use llvm::seq to walk all indices in the store.
@@ -314,7 +354,7 @@ class ValueStore
314
354
315
355
// Converts an id into an index into the set of chunks, and an offset into
316
356
// that specific chunk. Looks for index overflow in non-optimized builds.
317
- static auto IdToChunkIndices (IdType id ) -> std::pair<int32_t, int32_t> {
357
+ static auto IdToChunkIndices (int32_t index ) -> std::pair<int32_t, int32_t> {
318
358
constexpr auto LowBits = Chunk::IndexBits ();
319
359
320
360
// Verify there are no unused bits when indexing up to the `Capacity`. This
@@ -328,16 +368,20 @@ class ValueStore
328
368
static_assert (LowBits < 30 );
329
369
330
370
// The index of the chunk is the high bits.
331
- auto chunk = id. index >> LowBits;
371
+ auto chunk = index >> LowBits;
332
372
// The index into the chunk is the low bits.
333
- auto pos = id. index & ((1 << LowBits) - 1 );
373
+ auto pos = index & ((1 << LowBits) - 1 );
334
374
return {chunk, pos};
335
375
}
336
376
337
377
// Number of elements added to the store. The number should never exceed what
338
378
// fits in an `int32_t`, which is checked in non-optimized builds in Add().
339
379
int32_t size_ = 0 ;
340
380
381
+ public:
382
+ CheckIRIdTag tag_;
383
+
384
+ private:
341
385
// Storage for the `ValueType` objects, indexed by the id. We use a vector of
342
386
// chunks of `ValueType` instead of just a vector of `ValueType` so that
343
387
// addresses of `ValueType` objects are stable. This allows the rest of the
0 commit comments