value_store.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #ifndef CARBON_TOOLCHAIN_BASE_VALUE_STORE_H_
  5. #define CARBON_TOOLCHAIN_BASE_VALUE_STORE_H_
  6. #include <bit>
  7. #include <cstddef>
  8. #include <limits>
  9. #include <memory>
  10. #include <type_traits>
  11. #include <utility>
  12. #include "common/check.h"
  13. #include "common/ostream.h"
  14. #include "llvm/ADT/STLExtras.h"
  15. #include "llvm/ADT/Sequence.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/Support/MemAlloc.h"
  18. #include "toolchain/base/mem_usage.h"
  19. #include "toolchain/base/value_store_types.h"
  20. #include "toolchain/base/yaml.h"
  21. namespace Carbon {
  22. namespace Internal {
  23. // Used as a parent class for non-printable types. This is just for
  24. // std::conditional, not as an API.
  25. class ValueStoreNotPrintable {};
  26. } // namespace Internal
  27. struct IdTag {
  28. IdTag() = default;
  29. explicit IdTag(int32_t id_index, int32_t initial_reserved_ids)
  30. : // Shift down by 1 to get out of the high bit to avoid using any
  31. // negative ids, since they have special uses. Shift down by another 1
  32. // to free up the second highest bit for a marker to indicate whether
  33. // the index is tagged (& needs to be untagged) or not. Add one to the
  34. // index so it's not zero-based, to make it a bit less likely this
  35. // doesn't collide with anything else (though with the
  36. // second-highest-bit-tagging this might not be needed).
  37. id_tag_(llvm::reverseBits((((id_index + 1) << 1) | 1) << 1)),
  38. initial_reserved_ids_(initial_reserved_ids) {
  39. CARBON_CHECK(
  40. id_index != -1,
  41. "IdTag should be default constructed if no tagging id is available.");
  42. }
  43. auto Apply(int32_t index) const -> int32_t {
  44. CARBON_DCHECK(index >= 0, "{0}", index);
  45. if (index < initial_reserved_ids_) {
  46. return index;
  47. }
  48. // TODO: Assert that id_tag_ doesn't have the second highest bit set.
  49. auto tagged_index = index ^ id_tag_;
  50. CARBON_DCHECK(tagged_index >= 0, "{0}", tagged_index);
  51. return tagged_index;
  52. }
  53. auto Remove(int32_t tagged_index) const -> int32_t {
  54. CARBON_DCHECK(tagged_index >= 0, "{0}", tagged_index);
  55. if (!HasTag(tagged_index)) {
  56. CARBON_DCHECK(tagged_index < initial_reserved_ids_,
  57. "This untagged index is outside the initial reserved ids "
  58. "and should have been tagged.");
  59. return tagged_index;
  60. }
  61. auto index = tagged_index ^ id_tag_;
  62. CARBON_DCHECK(index >= initial_reserved_ids_,
  63. "When removing tagging bits, found an index that "
  64. "shouldn't've been tagged in the first place.");
  65. return index;
  66. }
  67. // Gets the value unique to this IdTag instance that is added to indices in
  68. // Apply, and removed in Remove.
  69. auto GetContainerTag() const -> int32_t {
  70. return (llvm::reverseBits(id_tag_) >> 2) - 1;
  71. }
  72. // Returns whether `tagged_index` has an IdTag applied to it, from this IdTag
  73. // instance or any other one.
  74. static auto HasTag(int32_t tagged_index) -> bool {
  75. return (llvm::reverseBits(2) & tagged_index) != 0;
  76. }
  77. template <class TagT>
  78. struct TagAndIndex {
  79. int32_t tag;
  80. int32_t index;
  81. };
  82. template <typename TagT>
  83. static auto DecomposeWithBestEffort(int32_t tagged_index)
  84. -> TagAndIndex<TagT> {
  85. if (tagged_index < 0) {
  86. // TODO: This should return TagT::None, but we need a fallback TagT other
  87. // than `int32_t`.
  88. return {TagT{-1}, tagged_index};
  89. }
  90. if (!HasTag(tagged_index)) {
  91. // TODO: This should return TagT::None, but we need a fallback TagT other
  92. // than `int32_t`.
  93. return {TagT{-1}, tagged_index};
  94. }
  95. int length = 0;
  96. int location = 0;
  97. for (int i = 0; i != 32; ++i) {
  98. int current_run = 0;
  99. int location_of_current_run = i;
  100. while (i != 32 && (tagged_index & (1 << i)) == 0) {
  101. ++current_run;
  102. ++i;
  103. }
  104. if (current_run != 0) {
  105. --i;
  106. }
  107. if (current_run > length) {
  108. length = current_run;
  109. location = location_of_current_run;
  110. }
  111. }
  112. if (length < 8) {
  113. // TODO: This should return TagT::None, but we need a fallback TagT other
  114. // than `int32_t`.
  115. return {TagT{-1}, tagged_index};
  116. }
  117. auto index_mask = llvm::maskTrailingOnes<uint32_t>(location);
  118. auto tag = (llvm::reverseBits(tagged_index & ~index_mask) >> 2) - 1;
  119. auto index = tagged_index & index_mask;
  120. return {.tag = TagT{static_cast<int32_t>(tag)},
  121. .index = static_cast<int32_t>(index)};
  122. }
  123. private:
  124. int32_t id_tag_ = 0;
  125. int32_t initial_reserved_ids_ = std::numeric_limits<int32_t>::max();
  126. };
  127. // A simple wrapper for accumulating values, providing IDs to later retrieve the
  128. // value. This does not do deduplication.
  129. template <typename IdT, typename ValueT>
  130. class ValueStore
  131. : public std::conditional<std::is_base_of_v<Printable<ValueT>, ValueT>,
  132. Yaml::Printable<ValueStore<IdT, ValueT>>,
  133. Internal::ValueStoreNotPrintable> {
  134. public:
  135. using IdType = IdT;
  136. using ValueType = ValueStoreTypes<ValueT>::ValueType;
  137. using RefType = ValueStoreTypes<ValueT>::RefType;
  138. using ConstRefType = ValueStoreTypes<ValueT>::ConstRefType;
  139. // A range over references to the values in a ValueStore, returned from
  140. // `ValueStore::values()`. Hides the complex type name of the iterator
  141. // internally to provide a type name (`Range`) that can be
  142. // referred to without auto and templates.
  143. class Range {
  144. public:
  145. explicit Range(const ValueStore& store [[clang::lifetimebound]])
  146. : flattened_range_(MakeFlattenedRange(store)) {}
  147. auto begin() const -> auto { return flattened_range_.begin(); }
  148. auto end() const -> auto { return flattened_range_.end(); }
  149. private:
  150. // Flattens the range of `Chunk`s of `ValueType`s into a single
  151. // range of `ValueType`s.
  152. static auto MakeFlattenedRange(const ValueStore& store) -> auto {
  153. // Because indices into `ValueStore` are all sequential values from 0, we
  154. // can use llvm::seq to walk all indices in the store.
  155. return llvm::map_range(llvm::seq(store.size_),
  156. [&](int32_t i) -> ConstRefType {
  157. return store.Get(IdType(store.tag_.Apply(i)));
  158. });
  159. }
  160. using FlattenedRangeType =
  161. decltype(MakeFlattenedRange(std::declval<const ValueStore&>()));
  162. FlattenedRangeType flattened_range_;
  163. };
  164. ValueStore() = default;
  165. explicit ValueStore(IdTag tag) : tag_(tag) {}
  166. template <typename Id>
  167. explicit ValueStore(Id id, int32_t initial_reserved_ids = 0)
  168. : tag_(id.index, initial_reserved_ids) {}
  169. // Stores the value and returns an ID to reference it.
  170. auto Add(ValueType value) -> IdType {
  171. // This routine is especially hot and the check here relatively expensive
  172. // for the value provided, so only do this in non-optimized builds to make
  173. // tracking down issues easier.
  174. CARBON_DCHECK(size_ < std::numeric_limits<int32_t>::max(), "Id overflow");
  175. IdType id(tag_.Apply(size_));
  176. auto [chunk_index, pos] = RawIndexToChunkIndices(size_);
  177. ++size_;
  178. CARBON_DCHECK(static_cast<size_t>(chunk_index) <= chunks_.size(),
  179. "{0} <= {1}", chunk_index, chunks_.size());
  180. if (static_cast<size_t>(chunk_index) == chunks_.size()) {
  181. chunks_.emplace_back();
  182. }
  183. CARBON_DCHECK(pos == chunks_[chunk_index].size());
  184. chunks_[chunk_index].Add(std::move(value));
  185. return id;
  186. }
  187. // Returns a mutable value for an ID.
  188. auto Get(IdType id) -> RefType {
  189. CARBON_DCHECK(id.index >= 0, "{0}", id);
  190. auto [chunk_index, pos] = IdToChunkIndices(id);
  191. return chunks_[chunk_index].Get(pos);
  192. }
  193. // Returns the value for an ID.
  194. auto Get(IdType id) const -> ConstRefType {
  195. CARBON_DCHECK(id.index >= 0, "{0}", id);
  196. auto [chunk_index, pos] = IdToChunkIndices(id);
  197. return chunks_[chunk_index].Get(pos);
  198. }
  199. // Returns the value for an ID, or a specified default value if a value has
  200. // not yet been added for this ID.
  201. auto GetWithDefault(IdType id, //
  202. ConstRefType default_value [[clang::lifetimebound]]) const
  203. -> ConstRefType {
  204. CARBON_DCHECK(id.index >= 0, "{0}", id);
  205. auto index = tag_.Remove(id.index);
  206. if (index >= size_) {
  207. return default_value;
  208. }
  209. auto [chunk_index, pos] = RawIndexToChunkIndices(index);
  210. return chunks_[chunk_index].Get(pos);
  211. }
  212. // Reserves space.
  213. auto Reserve(int32_t size) -> void {
  214. if (size <= size_) {
  215. return;
  216. }
  217. auto [final_chunk_index, _] = RawIndexToChunkIndices(size - 1);
  218. chunks_.resize(final_chunk_index + 1);
  219. }
  220. // Grows the ValueStore to `size`. Fills entries with `default_value`.
  221. auto Resize(int32_t size, ConstRefType default_value) -> void {
  222. if (size <= size_) {
  223. return;
  224. }
  225. auto [begin_chunk_index, begin_pos] = RawIndexToChunkIndices(size_);
  226. // Use an inclusive range so that if `size` would be the next chunk, we
  227. // don't try doing something with it.
  228. auto [end_chunk_index, end_pos] = RawIndexToChunkIndices(size - 1);
  229. chunks_.resize(end_chunk_index + 1);
  230. // If the begin and end chunks are the same, we only fill from begin to end.
  231. if (begin_chunk_index == end_chunk_index) {
  232. chunks_[begin_chunk_index].UninitializedFill(end_pos - begin_pos + 1,
  233. default_value);
  234. } else {
  235. // Otherwise, we do partial fills on the begin and end chunk, and full
  236. // fills on intermediate chunks.
  237. chunks_[begin_chunk_index].UninitializedFill(
  238. Chunk::Capacity() - begin_pos, default_value);
  239. for (auto i = begin_chunk_index + 1; i < end_chunk_index; ++i) {
  240. chunks_[i].UninitializedFill(Chunk::Capacity(), default_value);
  241. }
  242. chunks_[end_chunk_index].UninitializedFill(end_pos + 1, default_value);
  243. }
  244. // Update size.
  245. size_ = size;
  246. }
  247. // These are to support printable structures, and are not guaranteed.
  248. auto OutputYaml() const -> Yaml::OutputMapping {
  249. return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
  250. for (auto [id, value] : enumerate()) {
  251. map.Add(PrintToString(id), Yaml::OutputScalar(value));
  252. }
  253. });
  254. }
  255. // Collects memory usage of the values.
  256. auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
  257. -> void {
  258. mem_usage.Add(label.str(), size_ * sizeof(ValueType),
  259. Chunk::CapacityBytes * chunks_.size());
  260. }
  261. auto size() const -> size_t { return size_; }
  262. // Makes an iterable range over references to all values in the ValueStore.
  263. auto values() [[clang::lifetimebound]] -> auto {
  264. return llvm::map_range(llvm::seq(size_), [&](int32_t i) -> RefType {
  265. return Get(IdType(tag_.Apply(i)));
  266. });
  267. }
  268. auto values() const [[clang::lifetimebound]] -> Range { return Range(*this); }
  269. // Makes an iterable range over pairs of the index and a reference to the
  270. // value for each value in the store.
  271. //
  272. // The range is over references to the values in the store, even if used with
  273. // `auto` to destructure the pair. In this example, the `value` is a
  274. // `ConstRefType`:
  275. // ```
  276. // for (auto [id, value] : store.enumerate()) { ... }
  277. // ```
  278. auto enumerate() const [[clang::lifetimebound]] -> auto {
  279. // For `it->val`, writing `const std::pair` is required; otherwise
  280. // `mapped_iterator` incorrectly infers the pointer type for `PointerProxy`.
  281. // NOLINTNEXTLINE(readability-const-return-type)
  282. auto index_to_id = [&](int32_t i) -> const std::pair<IdType, ConstRefType> {
  283. IdType id(tag_.Apply(i));
  284. return std::pair<IdType, ConstRefType>(id, Get(id));
  285. };
  286. // Because indices into `ValueStore` are all sequential values from 0, we
  287. // can use llvm::seq to walk all indices in the store.
  288. return llvm::map_range(llvm::seq(size_), index_to_id);
  289. }
  290. auto GetIdTag() const -> IdTag { return tag_; }
  291. auto GetRawIndex(IdT id) const -> int32_t {
  292. CARBON_DCHECK(id.index >= 0, "{0}", index);
  293. auto index = tag_.Remove(id.index);
  294. #ifndef NDEBUG
  295. if (index >= size_) {
  296. // Attempt to decompose id.index to include extra detail in the check
  297. // here.
  298. //
  299. // TODO: Teach ValueStore the type of the tag id with a template, then we
  300. // can print it with proper formatting instead of just as an integer.
  301. auto [id_tag, id_untagged_index] =
  302. IdTag::DecomposeWithBestEffort<int32_t>(id.index);
  303. CARBON_DCHECK(
  304. index < size_,
  305. "Untagged index was outside of container range. Tagged index {0}. "
  306. "Best-effort decomposition: Tag: {1}, Index: {2}. "
  307. "Container size: {3}. "
  308. "Expected Tag for this container: {4}.",
  309. id.index, id_tag, id_untagged_index, size_, tag_.GetContainerTag());
  310. }
  311. #endif
  312. return index;
  313. }
  314. private:
  315. // A chunk of `ValueType`s which has a fixed capacity, but variable size.
  316. // Tracks the size internally for verifying bounds.
  317. struct Chunk {
  318. public:
  319. // The max size of each chunk allocation for `ValueStore`. This is based on
  320. // TLB page sizes for the target platform.
  321. //
  322. // See https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
  323. //
  324. // A 4K chunk size outperforms a 1M chunk size on Linux-x64 and MacOS-arm64
  325. // in benchmarks and when running file_test.
  326. //
  327. // Linux-x64: x64 CPUs support 4K and 2M page sizes, but we see 1M is slower
  328. // than 4K with tcmalloc in opt builds for our tests.
  329. //
  330. // Mac-arm64: arm64 CPUs support 4K, 8K, 64K, 256K, 1M, 4M and up. Like for
  331. // Linux-x64, 4K outperformed 1M. We didn't try other sizes yet.
  332. //
  333. // TODO: Is there a more optimize size for Mac-arm64? What should
  334. // Linux-arm64 and Mac-x64 use? What should Windows use?
  335. //
  336. // TODO: The previous SmallVector<ValueType> seems to outperform 4K chunks
  337. // (they may be slower by up to 5%) in benchmarks. Find ways to make
  338. // chunking faster. Should successive chunks get larger in size? That will
  339. // greatly complicate math for choosing a chunk though.
  340. static constexpr auto MaxAllocationBytes() -> int32_t {
  341. #if !defined(NDEBUG) || LLVM_ADDRESS_SANITIZER_BUILD
  342. // Use a small size in unoptimized builds to ensure multiple chunks get
  343. // used. And do the same in ASAN builds to reduce bookkeeping overheads.
  344. // Using large allocations (e.g. 1M+) incurs a 10x runtime cost for our
  345. // tests under ASAN.
  346. return sizeof(ValueType) * 5;
  347. #else
  348. return 4 * 1024;
  349. #endif
  350. }
  351. // The number of elements stored in each chunk allocation.
  352. //
  353. // The number must be a power of two so that that there are no unused values
  354. // in bits indexing into the allocation.
  355. static constexpr auto Capacity() -> int32_t {
  356. constexpr auto MaxElements = MaxAllocationBytes() / sizeof(ValueType);
  357. return std::bit_floor(MaxElements);
  358. }
  359. // The number of bits needed to index each element in a chunk allocation.
  360. static constexpr auto IndexBits() -> int32_t {
  361. static_assert(Capacity() > 0);
  362. return std::bit_width(uint32_t{Capacity() - 1});
  363. }
  364. static constexpr auto CapacityBytes = Capacity() * sizeof(ValueType);
  365. explicit Chunk()
  366. : buf_(reinterpret_cast<ValueType*>(
  367. llvm::allocate_buffer(CapacityBytes, alignof(ValueType)))) {}
  368. // Moving leaves nullptr behind in the moved-from object so that the
  369. // destructor is a no-op (preventing double free).
  370. Chunk(Chunk&& rhs) noexcept
  371. : buf_(std::exchange(rhs.buf_, nullptr)), num_(rhs.num_) {}
  372. auto operator=(Chunk&& rhs) noexcept -> Chunk& {
  373. buf_ = std::exchange(rhs.buf_, nullptr);
  374. num_ = rhs.num_;
  375. return *this;
  376. }
  377. ~Chunk() {
  378. if (buf_) {
  379. if constexpr (!std::is_trivially_destructible_v<ValueType>) {
  380. std::destroy_n(buf_, num_);
  381. }
  382. llvm::deallocate_buffer(buf_, CapacityBytes, alignof(ValueType));
  383. }
  384. }
  385. auto Get(int32_t i) -> ValueType& {
  386. CARBON_DCHECK(i < num_, "{0}", i);
  387. return buf_[i];
  388. }
  389. auto Get(int32_t i) const -> const ValueType& {
  390. CARBON_DCHECK(i < num_, "{0}", i);
  391. return buf_[i];
  392. }
  393. auto Add(ValueType&& value) -> void {
  394. CARBON_DCHECK(num_ < Capacity());
  395. std::construct_at(buf_ + num_, std::move(value));
  396. ++num_;
  397. }
  398. // Fills `fill_count` entries with `default_value`, increasing the size
  399. // respectively.
  400. auto UninitializedFill(int32_t fill_count, ConstRefType default_value)
  401. -> void {
  402. CARBON_DCHECK(num_ + fill_count <= Capacity());
  403. std::uninitialized_fill_n(buf_ + num_, fill_count, default_value);
  404. num_ += fill_count;
  405. }
  406. auto size() const -> int32_t { return num_; }
  407. private:
  408. // Verify using an `int32_t` for `num_` is sound.
  409. static_assert(Capacity() <= std::numeric_limits<int32_t>::max());
  410. ValueType* buf_;
  411. int32_t num_ = 0;
  412. };
  413. // Converts a raw index into an index into the set of chunks, and an offset
  414. // into that specific chunk. Looks for index overflow in non-optimized builds.
  415. static auto RawIndexToChunkIndices(int32_t index)
  416. -> std::pair<int32_t, int32_t> {
  417. constexpr auto LowBits = Chunk::IndexBits();
  418. // Verify there are no unused bits when indexing up to the `Capacity`. This
  419. // ensures that ids are contiguous values from 0, as if the values were all
  420. // stored in a single array, and allows using the ids to index into other
  421. // arrays.
  422. static_assert((1 << LowBits) == Chunk::Capacity());
  423. // Simple check to make sure nothing went wildly wrong with the `Capacity`,
  424. // and we have some room for a chunk index, and that shifting by the number
  425. // of bits won't be UB in an int32_t.
  426. static_assert(LowBits < 30);
  427. // The index of the chunk is the high bits.
  428. auto chunk = index >> LowBits;
  429. // The index into the chunk is the low bits.
  430. auto pos = index & ((1 << LowBits) - 1);
  431. return {chunk, pos};
  432. }
  433. // Converts an id into an index into the set of chunks, and an offset into
  434. // that specific chunk.
  435. auto IdToChunkIndices(IdType id) const -> std::pair<int32_t, int32_t> {
  436. return RawIndexToChunkIndices(GetRawIndex(id));
  437. }
  438. // Number of elements added to the store. The number should never exceed what
  439. // fits in an `int32_t`, which is checked in non-optimized builds in Add().
  440. int32_t size_ = 0;
  441. IdTag tag_;
  442. // Storage for the `ValueType` objects, indexed by the id. We use a vector of
  443. // chunks of `ValueType` instead of just a vector of `ValueType` so that
  444. // addresses of `ValueType` objects are stable. This allows the rest of the
  445. // toolchain to hold references into `ValueStore` without having to worry
  446. // about invalidation and use-after-free. We ensure at least one Chunk is held
  447. // inline so that in the case where there is only a single Chunk (i.e. small
  448. // files) we can avoid one indirection.
  449. llvm::SmallVector<Chunk, 1> chunks_;
  450. };
  451. } // namespace Carbon
  452. #endif // CARBON_TOOLCHAIN_BASE_VALUE_STORE_H_