Browse Source

Add assignment support to the hashtables. (#4066)

This lets copy and move assignment work. While it's a bit suboptimal to
do assignment with these tables, it still seems like an unreasonable
burden to not allow the basics to work. Even the toolchain ended up
doing this in a few places.

---------

Co-authored-by: Richard Smith <richard@metafoo.co.uk>
Chandler Carruth 1 year ago
parent
commit
af6312a3aa
5 changed files with 303 additions and 56 deletions
  1. 2 0
      common/map.h
  2. 58 0
      common/map_test.cpp
  3. 192 56
      common/raw_hashtable.h
  4. 2 0
      common/set.h
  5. 49 0
      common/set_test.cpp

+ 2 - 0
common/map.h

@@ -385,6 +385,8 @@ class Map : public RawHashtable::TableImpl<
   Map() = default;
   Map() = default;
   Map(const Map& arg) = default;
   Map(const Map& arg) = default;
   Map(Map&& arg) noexcept = default;
   Map(Map&& arg) noexcept = default;
+  auto operator=(const Map& arg) -> Map& = default;
+  auto operator=(Map&& arg) noexcept -> Map& = default;
 
 
   // Reset the entire state of the hashtable to as it was when constructed,
   // Reset the entire state of the hashtable to as it was when constructed,
   // throwing away any intervening allocations.
   // throwing away any intervening allocations.

+ 58 - 0
common/map_test.cpp

@@ -168,6 +168,26 @@ TYPED_TEST(MapTest, Copy) {
   MapT other_m2 = m;
   MapT other_m2 = m;
   ExpectMapElementsAre(
   ExpectMapElementsAre(
       other_m2, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
       other_m2, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Copy-assign updates.
+  other_m1 = m;
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Self-assign is a no-op.
+  other_m1 = const_cast<const MapT&>(other_m1);
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // But mutating original still doesn't change copies.
+  for (int i : llvm::seq(32, 48)) {
+    SCOPED_TRACE(llvm::formatv("Key: {0}", i).str());
+    ASSERT_TRUE(m.Insert(i, i * 100).is_inserted());
+  }
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+  ExpectMapElementsAre(
+      other_m2, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
 }
 }
 
 
 TYPED_TEST(MapTest, Move) {
 TYPED_TEST(MapTest, Move) {
@@ -193,6 +213,44 @@ TYPED_TEST(MapTest, Move) {
   }
   }
   ExpectMapElementsAre(
   ExpectMapElementsAre(
       other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
       other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Move back over a moved-from.
+  m = std::move(other_m1);
+  ExpectMapElementsAre(
+      m, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Copy over moved-from state also works.
+  other_m1 = m;
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Now add still more elements.
+  for (int i : llvm::seq(32, 48)) {
+    SCOPED_TRACE(llvm::formatv("Key: {0}", i).str());
+    ASSERT_TRUE(other_m1.Insert(i, i * 100).is_inserted());
+  }
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 48)));
+
+  // And move-assign over the copy looks like the moved-from table not the copy.
+  other_m1 = std::move(m);
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Self-swap (which does a self-move) works and is a no-op.
+  std::swap(other_m1, other_m1);
+  ExpectMapElementsAre(
+      other_m1, MakeKeyValues([](int k) { return k * 100; }, llvm::seq(1, 32)));
+
+  // Test copying of a moved-from table over a valid table and self-move-assign.
+  // The former is required to be valid, and the latter is in at least the case
+  // of self-move-assign-when-moved-from, but the result can be in any state so
+  // just do them and ensure we don't crash.
+  MapT other_m2 = other_m1;
+  // NOLINTNEXTLINE(bugprone-use-after-move): Testing required use-after-move.
+  other_m2 = m;
+  other_m1 = std::move(other_m1);
+  m = std::move(m);
 }
 }
 
 
 TYPED_TEST(MapTest, Conversions) {
 TYPED_TEST(MapTest, Conversions) {

+ 192 - 56
common/raw_hashtable.h

@@ -337,6 +337,8 @@ class ViewImpl {
   using MetricsT = Metrics;
   using MetricsT = Metrics;
 
 
   friend class BaseImpl<KeyT, ValueT, KeyContextT>;
   friend class BaseImpl<KeyT, ValueT, KeyContextT>;
+  template <typename InputBaseT, ssize_t SmallSize>
+  friend class TableImpl;
 
 
   // Make more-`const` types friends to enable conversions that add `const`.
   // Make more-`const` types friends to enable conversions that add `const`.
   friend class ViewImpl<const KeyT, ValueT, KeyContextT>;
   friend class ViewImpl<const KeyT, ValueT, KeyContextT>;
@@ -515,10 +517,15 @@ class BaseImpl {
   auto small_alloc_size() const -> ssize_t {
   auto small_alloc_size() const -> ssize_t {
     return static_cast<unsigned>(small_alloc_size_);
     return static_cast<unsigned>(small_alloc_size_);
   }
   }
-  auto is_small() const -> bool { return alloc_size() <= small_alloc_size(); }
+  auto is_small() const -> bool {
+    CARBON_DCHECK(alloc_size() >= small_alloc_size());
+    return alloc_size() == small_alloc_size();
+  }
 
 
   auto Construct(Storage* small_storage) -> void;
   auto Construct(Storage* small_storage) -> void;
   auto Destroy() -> void;
   auto Destroy() -> void;
+  auto CopySlotsFrom(const BaseImpl& arg) -> void;
+  auto MoveFrom(BaseImpl&& arg, Storage* small_storage) -> void;
 
 
   template <typename LookupKeyT>
   template <typename LookupKeyT>
   auto InsertIntoEmpty(LookupKeyT lookup_key, KeyContextT key_context)
   auto InsertIntoEmpty(LookupKeyT lookup_key, KeyContextT key_context)
@@ -566,6 +573,8 @@ class TableImpl : public InputBaseT {
   TableImpl() : BaseT(SmallSize, small_storage()) {}
   TableImpl() : BaseT(SmallSize, small_storage()) {}
   TableImpl(const TableImpl& arg);
   TableImpl(const TableImpl& arg);
   TableImpl(TableImpl&& arg) noexcept;
   TableImpl(TableImpl&& arg) noexcept;
+  auto operator=(const TableImpl& arg) -> TableImpl&;
+  auto operator=(TableImpl&& arg) noexcept -> TableImpl&;
 
 
   // Resets the hashtable to its initial state, clearing all entries and
   // Resets the hashtable to its initial state, clearing all entries and
   // releasing all memory. If the hashtable had an SSO buffer, that is restored
   // releasing all memory. If the hashtable had an SSO buffer, that is restored
@@ -580,6 +589,8 @@ class TableImpl : public InputBaseT {
 
 
   auto small_storage() const -> Storage*;
   auto small_storage() const -> Storage*;
 
 
+  auto SetUpStorage() -> void;
+
   [[no_unique_address]] mutable SmallStorage small_storage_;
   [[no_unique_address]] mutable SmallStorage small_storage_;
 };
 };
 
 
@@ -1130,6 +1141,99 @@ auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Destroy() -> void {
   Deallocate(storage(), alloc_size());
   Deallocate(storage(), alloc_size());
 }
 }
 
 
+// Copy all of the slots over from another table that is exactly the same
+// allocation size.
+//
+// This requires the current table to already have storage allocated and set up
+// but not initialized (or already cleared). It directly overwrites the storage
+// allocation of the table to match the incoming argument.
+//
+// Despite being used in construction, this shouldn't be called for a moved-from
+// `arg` -- in practice it is better for callers to handle this when setting up
+// storage.
+template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
+auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::CopySlotsFrom(
+    const BaseImpl& arg) -> void {
+  CARBON_DCHECK(alloc_size() == arg.alloc_size());
+  ssize_t local_size = alloc_size();
+
+  // Preserve which slot every entry is in, including tombstones in the
+  // metadata, in order to copy into the new table's storage without rehashing
+  // all of the keys. This is especially important as we don't have an easy way
+  // to access the key context needed for rehashing here.
+  uint8_t* local_metadata = metadata();
+  EntryT* local_entries = entries();
+  const uint8_t* local_arg_metadata = arg.metadata();
+  const EntryT* local_arg_entries = arg.entries();
+  memcpy(local_metadata, local_arg_metadata, local_size);
+
+  for (ssize_t group_index = 0; group_index < local_size;
+       group_index += GroupSize) {
+    auto g = MetadataGroup::Load(local_arg_metadata, group_index);
+    for (ssize_t byte_index : g.MatchPresent()) {
+      local_entries[group_index + byte_index].CopyFrom(
+          local_arg_entries[group_index + byte_index]);
+    }
+  }
+}
+
+// Move from another table to this one.
+//
+// Note that the `small_storage` is *this* table's small storage pointer,
+// provided from the `TableImpl` to this `BaseImpl` method as an argument.
+//
+// Requires the table to have size and growth already set up but otherwise the
+// the table has not yet been initialized. Notably, storage should either not
+// yet be constructed or already destroyed. It both sets up the storage and
+// handles any moving slots needed.
+//
+// Note that because this is used in construction it needs to handle a
+// moved-from `arg`.
+template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
+auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::MoveFrom(
+    BaseImpl&& arg, Storage* small_storage) -> void {
+  ssize_t local_size = alloc_size();
+  CARBON_DCHECK(local_size == arg.alloc_size());
+  // If `arg` is moved-from, skip the rest as the local size is all we need.
+  if (local_size == 0) {
+    return;
+  }
+
+  if (arg.is_small()) {
+    CARBON_DCHECK(local_size == small_alloc_size_);
+    this->storage() = small_storage;
+
+    // For small tables, we have to move the entries as we can't move the tables
+    // themselves. We do this preserving their slots and even tombstones to
+    // avoid rehashing.
+    uint8_t* local_metadata = this->metadata();
+    EntryT* local_entries = this->entries();
+    uint8_t* local_arg_metadata = arg.metadata();
+    EntryT* local_arg_entries = arg.entries();
+    memcpy(local_metadata, local_arg_metadata, local_size);
+    if (EntryT::IsTriviallyRelocatable) {
+      memcpy(local_entries, local_arg_entries, local_size * sizeof(EntryT));
+    } else {
+      for (ssize_t group_index = 0; group_index < local_size;
+           group_index += GroupSize) {
+        auto g = MetadataGroup::Load(local_arg_metadata, group_index);
+        for (ssize_t byte_index : g.MatchPresent()) {
+          local_entries[group_index + byte_index].MoveFrom(
+              std::move(local_arg_entries[group_index + byte_index]));
+        }
+      }
+    }
+  } else {
+    // Just point to the allocated storage.
+    storage() = arg.storage();
+  }
+
+  // Finally, put the incoming table into a moved-from state.
+  arg.alloc_size() = 0;
+  // Replace the pointer with null to ease debugging.
+  arg.storage() = nullptr;
+}
+
 // Optimized routine to insert a key into a table when that key *definitely*
 // Optimized routine to insert a key into a table when that key *definitely*
 // isn't present in the table and the table *definitely* has a viable empty slot
 // isn't present in the table and the table *definitely* has a viable empty slot
 // (and growth space) to insert into before any deleted slots. When both of
 // (and growth space) to insert into before any deleted slots. When both of
@@ -1399,35 +1503,59 @@ BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowAndInsert(
 template <typename InputBaseT, ssize_t SmallSize>
 template <typename InputBaseT, ssize_t SmallSize>
 TableImpl<InputBaseT, SmallSize>::TableImpl(const TableImpl& arg)
 TableImpl<InputBaseT, SmallSize>::TableImpl(const TableImpl& arg)
     : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
     : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
+  // Check for completely broken objects. These invariants should be true even
+  // in a moved-from state.
+  CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
+                arg.alloc_size() == SmallSize);
   CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
   CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
+  CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
 
 
-  ssize_t local_size = arg.alloc_size();
-
-  if (SmallSize > 0 && arg.is_small()) {
-    CARBON_DCHECK(local_size == SmallSize);
-    this->storage() = small_storage();
-  } else {
-    this->storage() = BaseT::Allocate(local_size);
+  if (this->alloc_size() != 0) {
+    SetUpStorage();
+    this->CopySlotsFrom(arg);
   }
   }
+}
 
 
-  // Preserve which slot every entry is in, including tombstones in the
-  // metadata, in order to copy into the new table's storage without rehashing
-  // all of the keys. This is especially important as we don't have an easy way
-  // to access the key context needed for rehashing here.
-  uint8_t* local_metadata = this->metadata();
-  EntryT* local_entries = this->entries();
-  const uint8_t* local_arg_metadata = arg.metadata();
-  const EntryT* local_arg_entries = arg.entries();
-  memcpy(local_metadata, local_arg_metadata, local_size);
-
-  for (ssize_t group_index = 0; group_index < local_size;
-       group_index += GroupSize) {
-    auto g = MetadataGroup::Load(local_arg_metadata, group_index);
-    for (ssize_t byte_index : g.MatchPresent()) {
-      local_entries[group_index + byte_index].CopyFrom(
-          local_arg_entries[group_index + byte_index]);
+template <typename InputBaseT, ssize_t SmallSize>
+auto TableImpl<InputBaseT, SmallSize>::operator=(const TableImpl& arg)
+    -> TableImpl& {
+  // Check for completely broken objects. These invariants should be true even
+  // in a moved-from state.
+  CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
+                arg.alloc_size() == SmallSize);
+  CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
+  CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
+
+  // We have to end up with an allocation size exactly equivalent to the
+  // incoming argument to avoid re-hashing every entry in the table, which isn't
+  // possible without key context.
+  if (arg.alloc_size() == this->alloc_size()) {
+    // No effective way for self-assignment to fall out of an efficient
+    // implementation so detect and bypass here. Similarly, if both are in a
+    // moved-from state, there is nothing to do.
+    if (&arg == this || this->alloc_size() == 0) {
+      return *this;
+    }
+    CARBON_DCHECK(arg.storage() != this->storage());
+    if constexpr (!EntryT::IsTriviallyDestructible) {
+      this->view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
+                                    [](auto...) {});
+    }
+  } else {
+    // The sizes don't match so destroy everything and re-setup the table
+    // storage.
+    this->Destroy();
+    this->alloc_size() = arg.alloc_size();
+    // If `arg` is moved-from, we've clear out our elements and put ourselves
+    // into a moved-from state. We're done.
+    if (this->alloc_size() == 0) {
+      return *this;
     }
     }
+    SetUpStorage();
   }
   }
+  this->growth_budget_ = arg.growth_budget_;
+  this->CopySlotsFrom(arg);
+  return *this;
 }
 }
 
 
 // Puts the incoming table into a moved-from state that can be destroyed or
 // Puts the incoming table into a moved-from state that can be destroyed or
@@ -1435,43 +1563,37 @@ TableImpl<InputBaseT, SmallSize>::TableImpl(const TableImpl& arg)
 template <typename InputBaseT, ssize_t SmallSize>
 template <typename InputBaseT, ssize_t SmallSize>
 TableImpl<InputBaseT, SmallSize>::TableImpl(TableImpl&& arg) noexcept
 TableImpl<InputBaseT, SmallSize>::TableImpl(TableImpl&& arg) noexcept
     : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
     : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
+  // Check for completely broken objects. These invariants should be true even
+  // in a moved-from state.
+  CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
+                arg.alloc_size() == SmallSize);
   CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
   CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
+  CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
+  this->MoveFrom(std::move(arg), small_storage());
+}
 
 
-  ssize_t local_size = arg.alloc_size();
+template <typename InputBaseT, ssize_t SmallSize>
+auto TableImpl<InputBaseT, SmallSize>::operator=(TableImpl&& arg) noexcept
+    -> TableImpl& {
+  // Check for completely broken objects. These invariants should be true even
+  // in a moved-from state.
+  CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
+                arg.alloc_size() == SmallSize);
+  CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
+  CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
 
 
-  if (SmallSize > 0 && arg.is_small()) {
-    CARBON_DCHECK(local_size == SmallSize);
-    this->storage() = small_storage();
+  // Destroy and deallocate our table.
+  this->Destroy();
 
 
-    // For small tables, we have to move the entries as we can't move the tables
-    // themselves. We do this preserving their slots and even tombstones to
-    // avoid rehashing.
-    uint8_t* local_metadata = this->metadata();
-    EntryT* local_entries = this->entries();
-    uint8_t* local_arg_metadata = arg.metadata();
-    EntryT* local_arg_entries = arg.entries();
-    memcpy(local_metadata, local_arg_metadata, local_size);
-    if (EntryT::IsTriviallyRelocatable) {
-      memcpy(local_entries, local_arg_entries, SmallSize * sizeof(EntryT));
-    } else {
-      for (ssize_t group_index = 0; group_index < local_size;
-           group_index += GroupSize) {
-        auto g = MetadataGroup::Load(local_arg_metadata, group_index);
-        for (ssize_t byte_index : g.MatchPresent()) {
-          local_entries[group_index + byte_index].MoveFrom(
-              std::move(local_arg_entries[group_index + byte_index]));
-        }
-      }
-    }
-  } else {
-    // Just point to the allocated storage.
-    this->storage() = arg.storage();
-  }
+  // Defend against self-move by zeroing the size here before we start moving
+  // out of `arg`.
+  this->alloc_size() = 0;
 
 
-  // Finally, put the incoming table into a moved-from state.
-  arg.alloc_size() = 0;
-  // Replace the pointer with null to ease debugging.
-  arg.storage() = nullptr;
+  // Setup to match argument and then finish the move.
+  this->alloc_size() = arg.alloc_size();
+  this->growth_budget_ = arg.growth_budget_;
+  this->MoveFrom(std::move(arg), small_storage());
+  return *this;
 }
 }
 
 
 // Reset a table to its original state, including releasing any allocated
 // Reset a table to its original state, including releasing any allocated
@@ -1524,6 +1646,20 @@ auto TableImpl<InputBaseT, SmallSize>::small_storage() const -> Storage* {
   }
   }
 }
 }
 
 
+// Helper to set up the storage of a table when a specific size has already been
+// set up. If possible, uses any small storage, otherwise allocates.
+template <typename InputBaseT, ssize_t SmallSize>
+auto TableImpl<InputBaseT, SmallSize>::SetUpStorage() -> void {
+  CARBON_DCHECK(this->small_alloc_size() == SmallSize);
+  ssize_t local_size = this->alloc_size();
+  CARBON_DCHECK(local_size != 0);
+  if (local_size == SmallSize) {
+    this->storage() = small_storage();
+  } else {
+    this->storage() = BaseT::Allocate(local_size);
+  }
+}
+
 }  // namespace Carbon::RawHashtable
 }  // namespace Carbon::RawHashtable
 
 
 #endif  // CARBON_COMMON_RAW_HASHTABLE_H_
 #endif  // CARBON_COMMON_RAW_HASHTABLE_H_

+ 2 - 0
common/set.h

@@ -281,6 +281,8 @@ class Set : public RawHashtable::TableImpl<SetBase<InputKeyT, InputKeyContextT>,
   Set() = default;
   Set() = default;
   Set(const Set& arg) = default;
   Set(const Set& arg) = default;
   Set(Set&& arg) noexcept = default;
   Set(Set&& arg) noexcept = default;
+  auto operator=(const Set& arg) -> Set& = default;
+  auto operator=(Set&& arg) noexcept -> Set& = default;
 
 
   // Reset the entire state of the hashtable to as it was when constructed,
   // Reset the entire state of the hashtable to as it was when constructed,
   // throwing away any intervening allocations.
   // throwing away any intervening allocations.

+ 49 - 0
common/set_test.cpp

@@ -134,6 +134,22 @@ TYPED_TEST(SetTest, Copy) {
   // A new copy does.
   // A new copy does.
   SetT other_s2 = s;
   SetT other_s2 = s;
   ExpectSetElementsAre(other_s2, MakeElements(llvm::seq(1, 32)));
   ExpectSetElementsAre(other_s2, MakeElements(llvm::seq(1, 32)));
+
+  // Copy-assign updates.
+  other_s1 = s;
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+
+  // Self-assign is a no-op.
+  other_s1 = const_cast<const SetT&>(other_s1);
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+
+  // But mutating original still doesn't change copies.
+  for (int i : llvm::seq(32, 48)) {
+    SCOPED_TRACE(llvm::formatv("Key: {0}", i).str());
+    ASSERT_TRUE(s.Insert(i).is_inserted());
+  }
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+  ExpectSetElementsAre(other_s2, MakeElements(llvm::seq(1, 32)));
 }
 }
 
 
 TYPED_TEST(SetTest, Move) {
 TYPED_TEST(SetTest, Move) {
@@ -157,6 +173,39 @@ TYPED_TEST(SetTest, Move) {
     ASSERT_TRUE(other_s1.Insert(i).is_inserted());
     ASSERT_TRUE(other_s1.Insert(i).is_inserted());
   }
   }
   ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
   ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+
+  // Move back over a moved-from.
+  s = std::move(other_s1);
+  ExpectSetElementsAre(s, MakeElements(llvm::seq(1, 32)));
+
+  // Copy over moved-from state also works.
+  other_s1 = s;
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+
+  // Now add still more elements.
+  for (int i : llvm::seq(32, 48)) {
+    SCOPED_TRACE(llvm::formatv("Key: {0}", i).str());
+    ASSERT_TRUE(other_s1.Insert(i).is_inserted());
+  }
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 48)));
+
+  // Move-assign over the copy looks like the moved-from table not the copy.
+  other_s1 = std::move(s);
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+
+  // Self-swap (which does a self-move) works and is a no-op.
+  std::swap(other_s1, other_s1);
+  ExpectSetElementsAre(other_s1, MakeElements(llvm::seq(1, 32)));
+
+  // Test copying of a moved-from table over a valid table and self-move-assign.
+  // The former is required to be valid, and the latter is in at least the case
+  // of self-move-assign-when-moved-from, but the result can be in any state so
+  // just do them and ensure we don't crash.
+  SetT other_s2 = other_s1;
+  // NOLINTNEXTLINE(bugprone-use-after-move): Testing required use-after-move.
+  other_s2 = s;
+  other_s1 = std::move(other_s1);
+  s = std::move(s);
 }
 }
 
 
 TYPED_TEST(SetTest, Conversions) {
 TYPED_TEST(SetTest, Conversions) {