OSDN Git Service

Import broadcast_ring.h from GVR
authorMichael Spang <spang@google.com>
Sat, 8 Apr 2017 00:22:17 +0000 (20:22 -0400)
committerMichael Spang <spang@google.com>
Mon, 10 Apr 2017 23:14:47 +0000 (19:14 -0400)
This will be used as a transport by the pose plumbing.

Bug: 37102512
Test: m -j32 broadcast_ring_tests; broadcast_ring_tests

Change-Id: I8122b98d9cc5fc9f148bbe9a823b17ac9d7a5626

libs/vr/libbroadcastring/Android.bp [new file with mode: 0644]
libs/vr/libbroadcastring/broadcast_ring_test.cc [new file with mode: 0644]
libs/vr/libbroadcastring/include/libbroadcastring/broadcast_ring.h [new file with mode: 0644]

diff --git a/libs/vr/libbroadcastring/Android.bp b/libs/vr/libbroadcastring/Android.bp
new file mode 100644 (file)
index 0000000..008468a
--- /dev/null
@@ -0,0 +1,37 @@
+cc_library_static {
+    name: "libbroadcastring",
+    host_supported: true,
+    clang: true,
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+    export_include_dirs: ["include"],
+    shared_libs: [
+        "libbase",
+    ],
+    export_shared_lib_headers: [
+        "libbase",
+    ],
+}
+
+cc_test {
+    name: "broadcast_ring_tests",
+    host_supported: true,
+    clang: true,
+    cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+    srcs: [
+        "broadcast_ring_test.cc",
+    ],
+    static_libs: [
+        "libbroadcastring",
+    ],
+    shared_libs: [
+        "libbase",
+    ],
+}
diff --git a/libs/vr/libbroadcastring/broadcast_ring_test.cc b/libs/vr/libbroadcastring/broadcast_ring_test.cc
new file mode 100644 (file)
index 0000000..dfdd4ef
--- /dev/null
@@ -0,0 +1,866 @@
+#include "libbroadcastring/broadcast_ring.h"
+
+#include <stdlib.h>
+#include <memory>
+#include <thread>  // NOLINT
+#include <sys/mman.h>
+
+#include <gtest/gtest.h>
+
+namespace android {
+namespace dvr {
+namespace {
+
+template <uint32_t N>
+struct alignas(8) Aligned {
+  char v[N];
+};
+
+template <uint32_t N>
+struct alignas(8) Sized {
+  Sized() { Clear(); }
+  explicit Sized(char c) { Fill(c); }
+  char v[sizeof(Aligned<N>)];
+  void Clear() { memset(v, 0, sizeof(v)); }
+  void Fill(char c) { memset(v, c, sizeof(v)); }
+  static Sized Pattern(uint8_t c) {
+    Sized sized;
+    for (size_t i = 0; i < sizeof(v); ++i) {
+      sized.v[i] = static_cast<char>(c + i);
+    }
+    return sized;
+  }
+  bool operator==(const Sized& right) const {
+    static_assert(sizeof(*this) == sizeof(v), "Size mismatch");
+    return !memcmp(v, right.v, sizeof(v));
+  }
+  template <typename SmallerSized>
+  SmallerSized Truncate() const {
+    SmallerSized val;
+    static_assert(sizeof(val.v) <= sizeof(v), "Cannot truncate to larger size");
+    memcpy(val.v, v, sizeof(val.v));
+    return val;
+  }
+};
+
+char FillChar(int val) { return static_cast<char>(val); }
+
+struct FakeMmap {
+  explicit FakeMmap(size_t size) : size(size), data(new char[size]) {}
+  size_t size;
+  std::unique_ptr<char[]> data;
+  void* mmap() { return static_cast<void*>(data.get()); }
+};
+
+template <typename Ring>
+FakeMmap CreateRing(Ring* ring, uint32_t count) {
+  FakeMmap mmap(Ring::MemorySize(count));
+  *ring = Ring::Create(mmap.mmap(), mmap.size, count);
+  return mmap;
+}
+
+template <typename RecordType, bool StaticSize = false,
+          uint32_t StaticCount = 0, uint32_t MaxReserved = 1,
+          uint32_t MinAvailable = 0>
+struct Traits {
+  using Record = RecordType;
+  static constexpr bool kUseStaticRecordSize = StaticSize;
+  static constexpr uint32_t kStaticRecordCount = StaticCount;
+  static constexpr uint32_t kMaxReservedRecords = MaxReserved;
+  static constexpr uint32_t kMinAvailableRecords = MinAvailable;
+  static constexpr uint32_t kMinRecordCount = MaxReserved + MinAvailable;
+};
+
+template <typename Record, bool StaticSize = false, uint32_t MaxReserved = 1,
+          uint32_t MinAvailable = 7>
+struct TraitsDynamic
+    : public Traits<Record, StaticSize, 0, MaxReserved, MinAvailable> {
+  using Ring = BroadcastRing<Record, TraitsDynamic>;
+  static uint32_t MinCount() { return MaxReserved + MinAvailable; }
+};
+
+template <typename Record, uint32_t StaticCount = 1, bool StaticSize = true,
+          uint32_t MaxReserved = 1, uint32_t MinAvailable = 0>
+struct TraitsStatic
+    : public Traits<Record, true, StaticCount, MaxReserved, MinAvailable> {
+  using Ring = BroadcastRing<Record, TraitsStatic>;
+  static uint32_t MinCount() { return StaticCount; }
+};
+
+using Dynamic_8_NxM = TraitsDynamic<Sized<8>>;
+using Dynamic_16_NxM = TraitsDynamic<Sized<16>>;
+using Dynamic_32_NxM = TraitsDynamic<Sized<32>>;
+using Dynamic_32_32xM = TraitsDynamic<Sized<32>, true>;
+using Dynamic_16_NxM_1plus0 = TraitsDynamic<Sized<16>, false, 1, 0>;
+using Dynamic_16_NxM_1plus1 = TraitsDynamic<Sized<16>, false, 1, 1>;
+using Dynamic_16_NxM_5plus11 = TraitsDynamic<Sized<16>, false, 5, 11>;
+using Dynamic_256_NxM_1plus0 = TraitsDynamic<Sized<256>, false, 1, 0>;
+
+using Static_8_8x1 = TraitsStatic<Sized<8>, 1>;
+using Static_8_8x16 = TraitsStatic<Sized<8>, 16>;
+using Static_16_16x8 = TraitsStatic<Sized<16>, 8>;
+using Static_16_16x16 = TraitsStatic<Sized<16>, 16>;
+using Static_16_16x32 = TraitsStatic<Sized<16>, 32>;
+using Static_32_Nx8 = TraitsStatic<Sized<32>, 8, false>;
+
+using TraitsList = ::testing::Types<Dynamic_8_NxM,           //
+                                    Dynamic_16_NxM,          //
+                                    Dynamic_32_NxM,          //
+                                    Dynamic_32_32xM,         //
+                                    Dynamic_16_NxM_1plus0,   //
+                                    Dynamic_16_NxM_1plus1,   //
+                                    Dynamic_16_NxM_5plus11,  //
+                                    Dynamic_256_NxM_1plus0,  //
+                                    Static_8_8x1,            //
+                                    Static_8_8x16,           //
+                                    Static_16_16x8,          //
+                                    Static_16_16x16,         //
+                                    Static_16_16x32,         //
+                                    Static_32_Nx8>;
+
+}  // namespace
+
+template <typename T>
+class BroadcastRingTest : public ::testing::Test {};
+
+TYPED_TEST_CASE(BroadcastRingTest, TraitsList);
+
+TYPED_TEST(BroadcastRingTest, Geometry) {
+  using Record = typename TypeParam::Record;
+  using Ring = typename TypeParam::Ring;
+  Ring ring;
+  auto mmap = CreateRing(&ring, Ring::Traits::MinCount());
+  EXPECT_EQ(Ring::Traits::MinCount(), ring.record_count());
+  EXPECT_EQ(sizeof(Record), ring.record_size());
+}
+
+TYPED_TEST(BroadcastRingTest, PutGet) {
+  using Record = typename TypeParam::Record;
+  using Ring = typename TypeParam::Ring;
+  Ring ring;
+  auto mmap = CreateRing(&ring, Ring::Traits::MinCount());
+  const uint32_t oldest_sequence_at_start = ring.GetOldestSequence();
+  const uint32_t next_sequence_at_start = ring.GetNextSequence();
+  {
+    uint32_t sequence = oldest_sequence_at_start;
+    Record record;
+    EXPECT_FALSE(ring.Get(&sequence, &record));
+    EXPECT_EQ(oldest_sequence_at_start, sequence);
+    EXPECT_EQ(Record(), record);
+  }
+  const Record original_record(0x1a);
+  ring.Put(original_record);
+  {
+    uint32_t sequence = next_sequence_at_start;
+    Record record;
+    EXPECT_TRUE(ring.Get(&sequence, &record));
+    EXPECT_EQ(next_sequence_at_start, sequence);
+    EXPECT_EQ(original_record, record);
+  }
+  {
+    uint32_t sequence = next_sequence_at_start + 1;
+    Record record;
+    EXPECT_FALSE(ring.Get(&sequence, &record));
+    EXPECT_EQ(next_sequence_at_start + 1, sequence);
+    EXPECT_EQ(Record(), record);
+  }
+}
+
+TYPED_TEST(BroadcastRingTest, FillOnce) {
+  using Record = typename TypeParam::Record;
+  using Ring = typename TypeParam::Ring;
+  Ring ring;
+  auto mmap = CreateRing(&ring, Ring::Traits::MinCount());
+  const uint32_t next_sequence_at_start = ring.GetNextSequence();
+  for (uint32_t i = 0; i < ring.record_count(); ++i)
+    ring.Put(Record(FillChar(i)));
+  for (uint32_t i = 0; i < ring.record_count(); ++i) {
+    const uint32_t expected_sequence = next_sequence_at_start + i;
+    const Record expected_record(FillChar(i));
+    {
+      uint32_t sequence = ring.GetOldestSequence() + i;
+      Record record;
+      EXPECT_TRUE(ring.Get(&sequence, &record));
+      EXPECT_EQ(expected_sequence, sequence);
+      EXPECT_EQ(expected_record, record);
+    }
+  }
+  {
+    uint32_t sequence = ring.GetOldestSequence() + ring.record_count();
+    Record record;
+    EXPECT_FALSE(ring.Get(&sequence, &record));
+  }
+}
+
+TYPED_TEST(BroadcastRingTest, FillTwice) {
+  using Record = typename TypeParam::Record;
+  using Ring = typename TypeParam::Ring;
+  Ring ring;
+  auto mmap = CreateRing(&ring, Ring::Traits::MinCount());
+  const uint32_t next_sequence_at_start = ring.GetNextSequence();
+  for (uint32_t i = 0; i < 2 * ring.record_count(); ++i) {
+    const Record newest_record(FillChar(i));
+    ring.Put(newest_record);
+
+    const uint32_t newest_sequence = next_sequence_at_start + i;
+    const uint32_t records_available = std::min(i + 1, ring.record_count());
+    const uint32_t oldest_sequence = newest_sequence - records_available + 1;
+    EXPECT_EQ(newest_sequence, ring.GetNewestSequence());
+    EXPECT_EQ(oldest_sequence, ring.GetOldestSequence());
+    EXPECT_EQ(newest_sequence + 1, ring.GetNextSequence());
+
+    for (uint32_t j = 0; j < records_available; ++j) {
+      const uint32_t sequence_jth_newest = newest_sequence - j;
+      const Record record_jth_newest(FillChar(i - j));
+
+      {
+        uint32_t sequence = sequence_jth_newest;
+        Record record;
+        EXPECT_TRUE(ring.Get(&sequence, &record));
+        EXPECT_EQ(sequence_jth_newest, sequence);
+        EXPECT_EQ(record_jth_newest, record);
+      }
+
+      {
+        uint32_t sequence = sequence_jth_newest;
+        Record record;
+        EXPECT_TRUE(ring.GetNewest(&sequence, &record));
+        EXPECT_EQ(newest_sequence, sequence);
+        EXPECT_EQ(newest_record, record);
+      }
+    }
+
+    const Record oldest_record(
+        FillChar(i + (oldest_sequence - newest_sequence)));
+    const uint32_t sequence_0th_overwritten = oldest_sequence - 1;
+    const uint32_t sequence_0th_future = newest_sequence + 1;
+    const uint32_t sequence_1st_future = newest_sequence + 2;
+
+    {
+      uint32_t sequence = sequence_0th_overwritten;
+      Record record;
+      EXPECT_TRUE(ring.Get(&sequence, &record));
+      EXPECT_EQ(oldest_sequence, sequence);
+      EXPECT_EQ(oldest_record, record);
+    }
+
+    {
+      uint32_t sequence = sequence_0th_overwritten;
+      Record record;
+      EXPECT_TRUE(ring.GetNewest(&sequence, &record));
+      EXPECT_EQ(newest_sequence, sequence);
+      EXPECT_EQ(newest_record, record);
+    }
+
+    {
+      uint32_t sequence = sequence_0th_future;
+      Record record;
+      EXPECT_FALSE(ring.Get(&sequence, &record));
+      EXPECT_EQ(sequence_0th_future, sequence);
+      EXPECT_EQ(Record(), record);
+    }
+
+    {
+      uint32_t sequence = sequence_0th_future;
+      Record record;
+      EXPECT_FALSE(ring.GetNewest(&sequence, &record));
+      EXPECT_EQ(sequence_0th_future, sequence);
+      EXPECT_EQ(Record(), record);
+    }
+
+    {
+      uint32_t sequence = sequence_1st_future;
+      Record record;
+      EXPECT_TRUE(ring.Get(&sequence, &record));
+      EXPECT_EQ(oldest_sequence, sequence);
+      EXPECT_EQ(oldest_record, record);
+    }
+
+    {
+      uint32_t sequence = sequence_1st_future;
+      Record record;
+      EXPECT_TRUE(ring.GetNewest(&sequence, &record));
+      EXPECT_EQ(newest_sequence, sequence);
+      EXPECT_EQ(newest_record, record);
+    }
+  }
+}
+
+TYPED_TEST(BroadcastRingTest, Import) {
+  using Record = typename TypeParam::Record;
+  using Ring = typename TypeParam::Ring;
+  Ring ring;
+  auto mmap = CreateRing(&ring, Ring::Traits::MinCount());
+
+  const uint32_t sequence_0 = ring.GetNextSequence();
+  const uint32_t sequence_1 = ring.GetNextSequence() + 1;
+  const Record record_0 = Record::Pattern(0x00);
+  const Record record_1 = Record::Pattern(0x80);
+  ring.Put(record_0);
+  ring.Put(record_1);
+
+  {
+    Ring imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) = Ring::Import(mmap.mmap(), mmap.size);
+    EXPECT_TRUE(import_ok);
+    EXPECT_EQ(ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(ring.record_count(), imported_ring.record_count());
+
+    if (ring.record_count() != 1) {
+      uint32_t sequence = sequence_0;
+      Record imported_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record));
+      EXPECT_EQ(sequence_0, sequence);
+      EXPECT_EQ(record_0, imported_record);
+    }
+
+    {
+      uint32_t sequence = sequence_1;
+      Record imported_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record));
+      EXPECT_EQ(sequence_1, sequence);
+      EXPECT_EQ(record_1, imported_record);
+    }
+  }
+}
+
+TEST(BroadcastRingTest, ShouldFailImportIfStaticSizeMismatch) {
+  using OriginalRing = typename Static_16_16x16::Ring;
+  using RecordSizeMismatchRing = typename Static_8_8x16::Ring;
+  using RecordCountMismatchRing = typename Static_16_16x8::Ring;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount());
+
+  {
+    using ImportedRing = RecordSizeMismatchRing;
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_FALSE(import_ok);
+    auto mmap_imported =
+        CreateRing(&imported_ring, ImportedRing::Traits::MinCount());
+    EXPECT_NE(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(original_ring.record_count(), imported_ring.record_count());
+  }
+
+  {
+    using ImportedRing = RecordCountMismatchRing;
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_FALSE(import_ok);
+    auto mmap_imported =
+        CreateRing(&imported_ring, ImportedRing::Traits::MinCount());
+    EXPECT_EQ(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_NE(original_ring.record_count(), imported_ring.record_count());
+  }
+}
+
+TEST(BroadcastRingTest, ShouldFailImportIfDynamicSizeGrows) {
+  using OriginalRing = typename Dynamic_8_NxM::Ring;
+  using RecordSizeGrowsRing = typename Dynamic_16_NxM::Ring;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount());
+
+  {
+    using ImportedRing = RecordSizeGrowsRing;
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_FALSE(import_ok);
+    auto mmap_imported =
+        CreateRing(&imported_ring, ImportedRing::Traits::MinCount());
+    EXPECT_LT(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(original_ring.record_count(), imported_ring.record_count());
+  }
+}
+
+TEST(BroadcastRingTest, ShouldFailImportIfCountTooSmall) {
+  using OriginalRing = typename Dynamic_16_NxM_1plus0::Ring;
+  using MinCountRing = typename Dynamic_16_NxM_1plus1::Ring;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount());
+
+  {
+    using ImportedRing = MinCountRing;
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_FALSE(import_ok);
+    auto mmap_imported =
+        CreateRing(&imported_ring, ImportedRing::Traits::MinCount());
+    EXPECT_EQ(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_LT(original_ring.record_count(), imported_ring.record_count());
+  }
+}
+
+TEST(BroadcastRingTest, ShouldFailImportIfMmapTooSmall) {
+  using OriginalRing = typename Dynamic_16_NxM::Ring;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount());
+
+  {
+    using ImportedRing = OriginalRing;
+    ImportedRing imported_ring;
+    bool import_ok;
+    const size_t kMinSize =
+        ImportedRing::MemorySize(original_ring.record_count());
+    std::tie(imported_ring, import_ok) = ImportedRing::Import(mmap.mmap(), 0);
+    EXPECT_FALSE(import_ok);
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), kMinSize - 1);
+    EXPECT_FALSE(import_ok);
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), kMinSize);
+    EXPECT_TRUE(import_ok);
+    EXPECT_EQ(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(original_ring.record_count(), imported_ring.record_count());
+  }
+}
+
+TEST(BroadcastRingTest, ShouldImportIfDynamicSizeShrinks) {
+  using OriginalRing = typename Dynamic_16_NxM::Ring;
+  using RecordSizeShrinksRing = typename Dynamic_8_NxM::Ring;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount());
+
+  using OriginalRecord = typename OriginalRing::Record;
+  const uint32_t original_sequence_0 = original_ring.GetNextSequence();
+  const uint32_t original_sequence_1 = original_ring.GetNextSequence() + 1;
+  const OriginalRecord original_record_0 = OriginalRecord::Pattern(0x00);
+  const OriginalRecord original_record_1 = OriginalRecord::Pattern(0x80);
+  original_ring.Put(original_record_0);
+  original_ring.Put(original_record_1);
+
+  {
+    using ImportedRing = RecordSizeShrinksRing;
+    using ImportedRecord = typename ImportedRing::Record;
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_TRUE(import_ok);
+    EXPECT_EQ(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(original_ring.record_count(), imported_ring.record_count());
+    EXPECT_GT(sizeof(OriginalRecord), sizeof(ImportedRecord));
+
+    {
+      uint32_t sequence = original_sequence_0;
+      ImportedRecord shrunk_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &shrunk_record));
+      EXPECT_EQ(original_sequence_0, sequence);
+      EXPECT_EQ(original_record_0.Truncate<ImportedRecord>(), shrunk_record);
+    }
+
+    {
+      uint32_t sequence = original_sequence_1;
+      ImportedRecord shrunk_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &shrunk_record));
+      EXPECT_EQ(original_sequence_1, sequence);
+      EXPECT_EQ(original_record_1.Truncate<ImportedRecord>(), shrunk_record);
+    }
+  }
+}
+
+TEST(BroadcastRingTest, ShouldImportIfCompatibleDynamicToStatic) {
+  using OriginalRing = typename Dynamic_16_NxM::Ring;
+  using ImportedRing = typename Static_16_16x16::Ring;
+  using OriginalRecord = typename OriginalRing::Record;
+  using ImportedRecord = typename ImportedRing::Record;
+  using StaticRing = ImportedRing;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, StaticRing::Traits::MinCount());
+
+  const uint32_t original_sequence_0 = original_ring.GetNextSequence();
+  const uint32_t original_sequence_1 = original_ring.GetNextSequence() + 1;
+  const OriginalRecord original_record_0 = OriginalRecord::Pattern(0x00);
+  const OriginalRecord original_record_1 = OriginalRecord::Pattern(0x80);
+  original_ring.Put(original_record_0);
+  original_ring.Put(original_record_1);
+
+  {
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_TRUE(import_ok);
+    EXPECT_EQ(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(original_ring.record_count(), imported_ring.record_count());
+
+    {
+      uint32_t sequence = original_sequence_0;
+      ImportedRecord imported_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record));
+      EXPECT_EQ(original_sequence_0, sequence);
+      EXPECT_EQ(original_record_0, imported_record);
+    }
+
+    {
+      uint32_t sequence = original_sequence_1;
+      ImportedRecord imported_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record));
+      EXPECT_EQ(original_sequence_1, sequence);
+      EXPECT_EQ(original_record_1, imported_record);
+    }
+  }
+}
+
+TEST(BroadcastRingTest, ShouldImportIfCompatibleStaticToDynamic) {
+  using OriginalRing = typename Static_16_16x16::Ring;
+  using ImportedRing = typename Dynamic_16_NxM::Ring;
+  using OriginalRecord = typename OriginalRing::Record;
+  using ImportedRecord = typename ImportedRing::Record;
+  using StaticRing = OriginalRing;
+
+  OriginalRing original_ring;
+  auto mmap = CreateRing(&original_ring, StaticRing::Traits::MinCount());
+
+  const uint32_t original_sequence_0 = original_ring.GetNextSequence();
+  const uint32_t original_sequence_1 = original_ring.GetNextSequence() + 1;
+  const OriginalRecord original_record_0 = OriginalRecord::Pattern(0x00);
+  const OriginalRecord original_record_1 = OriginalRecord::Pattern(0x80);
+  original_ring.Put(original_record_0);
+  original_ring.Put(original_record_1);
+
+  {
+    ImportedRing imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) =
+        ImportedRing::Import(mmap.mmap(), mmap.size);
+    EXPECT_TRUE(import_ok);
+    EXPECT_EQ(original_ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(original_ring.record_count(), imported_ring.record_count());
+
+    {
+      uint32_t sequence = original_sequence_0;
+      ImportedRecord imported_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record));
+      EXPECT_EQ(original_sequence_0, sequence);
+      EXPECT_EQ(original_record_0, imported_record);
+    }
+
+    {
+      uint32_t sequence = original_sequence_1;
+      ImportedRecord imported_record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record));
+      EXPECT_EQ(original_sequence_1, sequence);
+      EXPECT_EQ(original_record_1, imported_record);
+    }
+  }
+}
+
+TEST(BroadcastRingTest, ShouldImportIfReadonlyMmap) {
+  using Ring = Dynamic_32_NxM::Ring;
+  using Record = Ring::Record;
+
+  uint32_t record_count = Ring::Traits::MinCount();
+  size_t ring_size = Ring::MemorySize(record_count);
+
+  size_t page_size = sysconf(_SC_PAGESIZE);
+  size_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1);
+  ASSERT_GE(mmap_size, ring_size);
+
+  void* mmap_base = mmap(nullptr, mmap_size, PROT_READ | PROT_WRITE,
+                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  ASSERT_NE(MAP_FAILED, mmap_base);
+
+  Ring ring = Ring::Create(mmap_base, mmap_size, record_count);
+  for (uint32_t i = 0; i < record_count; ++i) ring.Put(Record(FillChar(i)));
+
+  ASSERT_EQ(0, mprotect(mmap_base, mmap_size, PROT_READ));
+
+  {
+    Ring imported_ring;
+    bool import_ok;
+    std::tie(imported_ring, import_ok) = Ring::Import(mmap_base, mmap_size);
+    EXPECT_TRUE(import_ok);
+    EXPECT_EQ(ring.record_size(), imported_ring.record_size());
+    EXPECT_EQ(ring.record_count(), imported_ring.record_count());
+
+    uint32_t oldest_sequence = imported_ring.GetOldestSequence();
+    for (uint32_t i = 0; i < record_count; ++i) {
+      uint32_t sequence = oldest_sequence + i;
+      Record record;
+      EXPECT_TRUE(imported_ring.Get(&sequence, &record));
+      EXPECT_EQ(Record(FillChar(i)), record);
+    }
+  }
+
+  ASSERT_EQ(0, munmap(mmap_base, mmap_size));
+}
+
+TEST(BroadcastRingTest, ShouldDieIfPutReadonlyMmap) {
+  using Ring = Dynamic_32_NxM::Ring;
+  using Record = Ring::Record;
+
+  uint32_t record_count = Ring::Traits::MinCount();
+  size_t ring_size = Ring::MemorySize(record_count);
+
+  size_t page_size = sysconf(_SC_PAGESIZE);
+  size_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1);
+  ASSERT_GE(mmap_size, ring_size);
+
+  void* mmap_base = mmap(nullptr, mmap_size, PROT_READ | PROT_WRITE,
+                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  ASSERT_NE(MAP_FAILED, mmap_base);
+
+  Ring ring = Ring::Create(mmap_base, mmap_size, record_count);
+  for (uint32_t i = 0; i < record_count; ++i) ring.Put(Record(FillChar(i)));
+
+  ASSERT_EQ(0, mprotect(mmap_base, mmap_size, PROT_READ));
+
+  EXPECT_DEATH_IF_SUPPORTED({ ring.Put(Record(7)); }, "");
+
+  ASSERT_EQ(0, munmap(mmap_base, mmap_size));
+}
+
+TEST(BroadcastRingTest, ShouldDieIfCreationMmapTooSmall) {
+  using Ring = Dynamic_32_NxM::Ring;
+  using Record = Ring::Record;
+
+  uint32_t record_count = Ring::Traits::MinCount();
+  size_t ring_size = Ring::MemorySize(record_count);
+  FakeMmap mmap(ring_size);
+
+  EXPECT_DEATH_IF_SUPPORTED({
+    Ring ring = Ring::Create(mmap.mmap(), ring_size - 1, record_count);
+  }, "");
+
+  Ring ring = Ring::Create(mmap.mmap(), ring_size, record_count);
+
+  ring.Put(Record(3));
+
+  {
+    uint32_t sequence = ring.GetNewestSequence();
+    Record record;
+    EXPECT_TRUE(ring.Get(&sequence, &record));
+    EXPECT_EQ(Record(3), record);
+  }
+}
+
+TEST(BroadcastRingTest, ShouldDieIfCreationMmapMisaligned) {
+  using Ring = Static_8_8x1::Ring;
+  using Record = Ring::Record;
+
+  constexpr int kAlign = Ring::mmap_alignment();
+  constexpr int kMisalign = kAlign / 2;
+  size_t ring_size = Ring::MemorySize();
+  std::unique_ptr<char[]> buf(new char[ring_size + kMisalign]);
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      { Ring ring = Ring::Create(buf.get() + kMisalign, ring_size); }, "");
+
+  Ring ring = Ring::Create(buf.get(), ring_size);
+
+  ring.Put(Record(3));
+
+  {
+    uint32_t sequence = ring.GetNewestSequence();
+    Record record;
+    EXPECT_TRUE(ring.Get(&sequence, &record));
+    EXPECT_EQ(Record(3), record);
+  }
+}
+
+template <typename Ring>
+std::unique_ptr<std::thread> CopyTask(std::atomic<bool>* quit, void* in_base,
+                                      size_t in_size, void* out_base,
+                                      size_t out_size) {
+  return std::unique_ptr<std::thread>(
+      new std::thread([quit, in_base, in_size, out_base, out_size]() {
+        using Record = typename Ring::Record;
+
+        bool import_ok;
+        Ring in_ring;
+        Ring out_ring;
+        std::tie(in_ring, import_ok) = Ring::Import(in_base, in_size);
+        ASSERT_TRUE(import_ok);
+        std::tie(out_ring, import_ok) = Ring::Import(out_base, out_size);
+        ASSERT_TRUE(import_ok);
+
+        uint32_t sequence = in_ring.GetOldestSequence();
+        while (!std::atomic_load_explicit(quit, std::memory_order_relaxed)) {
+          Record record;
+          if (in_ring.Get(&sequence, &record)) {
+            out_ring.Put(record);
+            sequence++;
+          }
+        }
+      }));
+}
+
+TEST(BroadcastRingTest, ThreadedCopySingle) {
+  using Ring = Dynamic_32_NxM::Ring;
+  using Record = Ring::Record;
+  Ring in_ring;
+  auto in_mmap = CreateRing(&in_ring, Ring::Traits::MinCount());
+
+  Ring out_ring;
+  auto out_mmap = CreateRing(&out_ring, Ring::Traits::MinCount());
+
+  std::atomic<bool> quit(false);
+  std::unique_ptr<std::thread> copy_task = CopyTask<Ring>(
+      &quit, out_mmap.mmap(), out_mmap.size, in_mmap.mmap(), in_mmap.size);
+
+  const Record out_record(0x1c);
+  out_ring.Put(out_record);
+
+  uint32_t in_sequence = in_ring.GetOldestSequence();
+  Record in_record;
+  while (!in_ring.Get(&in_sequence, &in_record)) {
+    // Do nothing.
+  }
+
+  EXPECT_EQ(out_record, in_record);
+  std::atomic_store_explicit(&quit, true, std::memory_order_relaxed);
+  copy_task->join();
+}
+
+TEST(BroadcastRingTest, ThreadedCopyLossless) {
+  using Ring = Dynamic_32_NxM::Ring;
+  using Record = Ring::Record;
+  Ring in_ring;
+  auto in_mmap = CreateRing(&in_ring, Ring::Traits::MinCount());
+
+  Ring out_ring;
+  auto out_mmap = CreateRing(&out_ring, Ring::Traits::MinCount());
+
+  std::atomic<bool> quit(false);
+  std::unique_ptr<std::thread> copy_task = CopyTask<Ring>(
+      &quit, out_mmap.mmap(), out_mmap.size, in_mmap.mmap(), in_mmap.size);
+
+  constexpr uint32_t kRecordsToProcess = 10000;
+  uint32_t out_records = 0;
+  uint32_t in_records = 0;
+  uint32_t in_sequence = in_ring.GetNextSequence();
+  while (out_records < kRecordsToProcess || in_records < kRecordsToProcess) {
+    if (out_records < kRecordsToProcess &&
+        out_records - in_records < out_ring.record_count()) {
+      const Record out_record(FillChar(out_records));
+      out_ring.Put(out_record);
+      out_records++;
+    }
+
+    Record in_record;
+    while (in_ring.Get(&in_sequence, &in_record)) {
+      EXPECT_EQ(Record(FillChar(in_records)), in_record);
+      in_records++;
+      in_sequence++;
+    }
+  }
+
+  EXPECT_EQ(kRecordsToProcess, out_records);
+  EXPECT_EQ(kRecordsToProcess, in_records);
+
+  std::atomic_store_explicit(&quit, true, std::memory_order_relaxed);
+  copy_task->join();
+}
+
+TEST(BroadcastRingTest, ThreadedCopyLossy) {
+  using Ring = Dynamic_32_NxM::Ring;
+  using Record = Ring::Record;
+  Ring in_ring;
+  auto in_mmap = CreateRing(&in_ring, Ring::Traits::MinCount());
+
+  Ring out_ring;
+  auto out_mmap = CreateRing(&out_ring, Ring::Traits::MinCount());
+
+  std::atomic<bool> quit(false);
+  std::unique_ptr<std::thread> copy_task = CopyTask<Ring>(
+      &quit, out_mmap.mmap(), out_mmap.size, in_mmap.mmap(), in_mmap.size);
+
+  constexpr uint32_t kRecordsToProcess = 100000;
+  uint32_t out_records = 0;
+  uint32_t in_records = 0;
+  uint32_t in_sequence = in_ring.GetNextSequence();
+  while (out_records < kRecordsToProcess) {
+    const Record out_record(FillChar(out_records));
+    out_ring.Put(out_record);
+    out_records++;
+
+    Record in_record;
+    if (in_ring.GetNewest(&in_sequence, &in_record)) {
+      EXPECT_EQ(Record(in_record.v[0]), in_record);
+      in_records++;
+      in_sequence++;
+    }
+  }
+
+  EXPECT_EQ(kRecordsToProcess, out_records);
+  EXPECT_GE(kRecordsToProcess, in_records);
+
+  std::atomic_store_explicit(&quit, true, std::memory_order_relaxed);
+  copy_task->join();
+}
+
+template <typename Ring>
+std::unique_ptr<std::thread> CheckFillTask(std::atomic<bool>* quit,
+                                           void* in_base, size_t in_size) {
+  return std::unique_ptr<std::thread>(
+      new std::thread([quit, in_base, in_size]() {
+        using Record = typename Ring::Record;
+
+        bool import_ok;
+        Ring in_ring;
+        std::tie(in_ring, import_ok) = Ring::Import(in_base, in_size);
+        ASSERT_TRUE(import_ok);
+
+        uint32_t sequence = in_ring.GetOldestSequence();
+        while (!std::atomic_load_explicit(quit, std::memory_order_relaxed)) {
+          Record record;
+          if (in_ring.Get(&sequence, &record)) {
+            ASSERT_EQ(Record(record.v[0]), record);
+            sequence++;
+          }
+        }
+      }));
+}
+
+template <typename Ring>
+void ThreadedOverwriteTorture() {
+  using Record = typename Ring::Record;
+
+  // Maximize overwrites by having few records.
+  const int kMinRecordCount = 1;
+  const int kMaxRecordCount = 4;
+
+  for (int count = kMinRecordCount; count <= kMaxRecordCount; count *= 2) {
+    Ring out_ring;
+    auto out_mmap = CreateRing(&out_ring, count);
+
+    std::atomic<bool> quit(false);
+    std::unique_ptr<std::thread> check_task =
+        CheckFillTask<Ring>(&quit, out_mmap.mmap(), out_mmap.size);
+
+    constexpr int kIterations = 10000;
+    for (int i = 0; i < kIterations; ++i) {
+      const Record record(FillChar(i));
+      out_ring.Put(record);
+    }
+
+    std::atomic_store_explicit(&quit, true, std::memory_order_relaxed);
+    check_task->join();
+  }
+}
+
+TEST(BroadcastRingTest, ThreadedOverwriteTortureSmall) {
+  ThreadedOverwriteTorture<Dynamic_16_NxM_1plus0::Ring>();
+}
+
+TEST(BroadcastRingTest, ThreadedOverwriteTortureLarge) {
+  ThreadedOverwriteTorture<Dynamic_256_NxM_1plus0::Ring>();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libbroadcastring/include/libbroadcastring/broadcast_ring.h b/libs/vr/libbroadcastring/include/libbroadcastring/broadcast_ring.h
new file mode 100644 (file)
index 0000000..69cb648
--- /dev/null
@@ -0,0 +1,668 @@
+#ifndef ANDROID_DVR_BROADCAST_RING_H_
+#define ANDROID_DVR_BROADCAST_RING_H_
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <atomic>
+#include <limits>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "android-base/logging.h"
+
+#if ATOMIC_LONG_LOCK_FREE != 2 || ATOMIC_INT_LOCK_FREE != 2
+#error "This file requires lock free atomic uint32_t and long"
+#endif
+
+namespace android {
+namespace dvr {
+
+struct DefaultRingTraits {
+  // Set this to false to allow compatibly expanding the record size.
+  static constexpr bool kUseStaticRecordSize = false;
+
+  // Set this to a nonzero value to fix the number of records in the ring.
+  static constexpr uint32_t kStaticRecordCount = 0;
+
+  // Set this to the max number of records that can be written simultaneously.
+  static constexpr uint32_t kMaxReservedRecords = 1;
+
+  // Set this to the min number of records that must be readable.
+  static constexpr uint32_t kMinAvailableRecords = 1;
+};
+
+// Nonblocking ring suitable for concurrent single-writer, multi-reader access.
+//
+// Readers never block the writer and thus this is a nondeterministically lossy
+// transport in the absence of external synchronization. Don't use this as a
+// transport when deterministic behavior is required.
+//
+// Readers may have a read-only mapping; each reader's state is a single local
+// sequence number.
+//
+// The implementation takes care to avoid data races on record access.
+// Inconsistent data can only be returned if at least 2^32 records are written
+// during the read-side critical section.
+//
+// In addition, both readers and the writer are careful to avoid accesses
+// outside the bounds of the mmap area passed in during initialization even if
+// there is a misbehaving or malicious task with write access to the mmap area.
+//
+// When dynamic record size is enabled, readers use the record size in the ring
+// header when indexing the ring, so that it is possible to extend the record
+// type without breaking the read-side ABI.
+//
+// Avoid calling Put() in a tight loop; there should be significantly more time
+// between successive puts than it takes to read one record from memory to
+// ensure Get() completes quickly. This requirement should not be difficult to
+// achieve for most practical uses; 4kB puts at 10,000Hz is well below the
+// scaling limit on current mobile chips.
+//
+// Example Writer Usage:
+//
+//   using Record = MyRecordType;
+//   using Ring = BroadcastRing<Record>;
+//
+//   uint32_t record_count = kMyDesiredCount;
+//   uint32_t ring_size = Ring::MemorySize(record_count);
+//
+//   size_t page_size = sysconf(_SC_PAGESIZE);
+//   uint32_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1);
+//
+//   // Allocate & map via your preferred mechanism, e.g.
+//   int fd = open("/dev/shm/ring_test", O_CREAT|O_RDWR|O_CLOEXEC, 0600);
+//   CHECK(fd >= 0);
+//   CHECK(!ftruncate(fd, ring_size));
+//   void *mmap_base = mmap(nullptr, mmap_size, PROT_READ|PROT_WRITE,
+//                          MAP_SHARED, fd, 0);
+//   CHECK(mmap_base != MAP_FAILED);
+//   close(fd);
+//
+//   Ring ring = Ring::Create(mmap_base, mmap_size, record_count);
+//
+//   while (!done)
+//     ring.Put(BuildNextRecordBlocking());
+//
+//   CHECK(!munmap(mmap_base, mmap_size));
+//
+// Example Reader Usage:
+//
+//   using Record = MyRecordType;
+//   using Ring = BroadcastRing<Record>;
+//
+//   // Map via your preferred mechanism, e.g.
+//   int fd = open("/dev/shm/ring_test", O_RDONLY|O_CLOEXEC);
+//   CHECK(fd >= 0);
+//   struct stat st;
+//   CHECK(!fstat(fd, &st));
+//   size_t mmap_size = st.st_size;
+//   void *mmap_base = mmap(nullptr, mmap_size, PROT_READ,
+//                          MAP_SHARED, fd, 0);
+//   CHECK(mmap_base != MAP_FAILED);
+//   close(fd);
+//
+//   Ring ring;
+//   bool import_ok;
+//   std::tie(ring, import_ok) = Ring::Import(mmap_base, mmap_size);
+//   CHECK(import_ok);
+//
+//   uint32_t sequence;
+//
+//   // Choose starting point (using "0" is unpredictable but not dangerous)
+//   sequence = ring.GetOldestSequence();  // The oldest available
+//   sequence = ring.GetNewestSequence();  // The newest available
+//   sequence = ring.GetNextSequence();    // The next one produced
+//
+//   while (!done) {
+//     Record record;
+//
+//     if (you_want_to_process_all_available_records) {
+//       while (ring.Get(&sequence, &record)) {
+//         ProcessRecord(sequence, record);
+//         sequence++;
+//       }
+//     } else if (you_want_to_skip_to_the_newest_record) {
+//       if (ring.GetNewest(&sequence, &record)) {
+//         ProcessRecord(sequence, record);
+//         sequence++;
+//       }
+//     }
+//
+//     DoSomethingExpensiveOrBlocking();
+//   }
+//
+//   CHECK(!munmap(mmap_base, mmap_size));
+//
+template <typename RecordType, typename BaseTraits = DefaultRingTraits>
+class BroadcastRing {
+ public:
+  using Record = RecordType;
+  struct Traits : public BaseTraits {
+    // Must have enough space for writers, plus enough space for readers.
+    static constexpr int kMinRecordCount =
+        BaseTraits::kMaxReservedRecords + BaseTraits::kMinAvailableRecords;
+
+    // Count of zero means dynamic, non-zero means static.
+    static constexpr bool kUseStaticRecordCount =
+        (BaseTraits::kStaticRecordCount != 0);
+
+    // If both record size and count are static then the overall size is too.
+    static constexpr bool kIsStaticSize =
+        BaseTraits::kUseStaticRecordSize && kUseStaticRecordCount;
+  };
+
+  static constexpr bool IsPowerOfTwo(uint32_t size) {
+    return (size & (size - 1)) == 0;
+  }
+
+  // Sanity check the options provided in Traits.
+  static_assert(Traits::kMinRecordCount >= 1, "Min record count too small");
+  static_assert(!Traits::kUseStaticRecordCount ||
+                    Traits::kStaticRecordCount >= Traits::kMinRecordCount,
+                "Static record count is too small");
+  static_assert(!Traits::kStaticRecordCount ||
+                    IsPowerOfTwo(Traits::kStaticRecordCount),
+                "Static record count is not a power of two");
+  static_assert(std::is_standard_layout<Record>::value,
+                "Record type must be standard layout");
+
+  BroadcastRing() {}
+
+  // Creates a new ring at |mmap| with |record_count| records.
+  //
+  // There must be at least |MemorySize(record_count)| bytes of space already
+  // allocated at |mmap|. The ring does not take ownership.
+  //
+  // Use this function for dynamically sized rings.
+  static BroadcastRing Create(void* mmap, size_t mmap_size,
+                              uint32_t record_count) {
+    BroadcastRing ring(mmap);
+    CHECK(ring.ValidateGeometry(mmap_size, sizeof(Record), record_count));
+    ring.InitializeHeader(sizeof(Record), record_count);
+    return ring;
+  }
+
+  // Creates a new ring at |mmap|.
+  //
+  // There must be at least |MemorySize()| bytes of space already allocated at
+  // |mmap|. The ring does not take ownership.
+  //
+  // Use this function for statically sized rings.
+  static BroadcastRing Create(void* mmap, size_t mmap_size) {
+    static_assert(Traits::kUseStaticRecordCount,
+                  "Wrong Create() function called for dynamic record count");
+    return Create(mmap, mmap_size, Traits::kStaticRecordCount);
+  }
+
+  // Imports an existing ring at |mmap|.
+  //
+  // Import may fail if the ring parameters in the mmap header are not sensible.
+  // In this case the returned boolean is false; make sure to check this value.
+  static std::tuple<BroadcastRing, bool> Import(void* mmap, size_t mmap_size) {
+    BroadcastRing ring(mmap);
+    uint32_t record_size = 0;
+    uint32_t record_count = 0;
+    if (mmap_size >= sizeof(Header)) {
+      record_size = std::atomic_load_explicit(&ring.header_mmap()->record_size,
+                                              std::memory_order_relaxed);
+      record_count = std::atomic_load_explicit(
+          &ring.header_mmap()->record_count, std::memory_order_relaxed);
+    }
+    bool ok = ring.ValidateGeometry(mmap_size, record_size, record_count);
+    return std::make_tuple(ring, ok);
+  }
+
+  ~BroadcastRing() {}
+
+  // Calculates the space necessary for a ring of size |record_count|.
+  //
+  // Use this function for dynamically sized rings.
+  static constexpr size_t MemorySize(uint32_t record_count) {
+    return sizeof(Header) + sizeof(Record) * record_count;
+  }
+
+  // Calculates the space necessary for a statically sized ring.
+  //
+  // Use this function for statically sized rings.
+  static constexpr size_t MemorySize() {
+    static_assert(
+        Traits::kUseStaticRecordCount,
+        "Wrong MemorySize() function called for dynamic record count");
+    return MemorySize(Traits::kStaticRecordCount);
+  }
+
+  // Writes a record to the ring.
+  //
+  // The oldest record is overwritten unless the ring is not already full.
+  void Put(const Record& record) {
+    const int kRecordCount = 1;
+    Reserve(kRecordCount);
+    Geometry geometry = GetGeometry();
+    PutRecordInternal(&record, record_mmap_writer(geometry.tail_index));
+    Publish(kRecordCount);
+  }
+
+  // Gets sequence number of the oldest currently available record.
+  uint32_t GetOldestSequence() const {
+    return std::atomic_load_explicit(&header_mmap()->head,
+                                     std::memory_order_relaxed);
+  }
+
+  // Gets sequence number of the first future record.
+  //
+  // If the returned value is passed to Get() and there is no concurrent Put(),
+  // Get() will return false.
+  uint32_t GetNextSequence() const {
+    return std::atomic_load_explicit(&header_mmap()->tail,
+                                     std::memory_order_relaxed);
+  }
+
+  // Gets sequence number of the newest currently available record.
+  uint32_t GetNewestSequence() const { return GetNextSequence() - 1; }
+
+  // Copies the oldest available record with sequence at least |*sequence| to
+  // |record|.
+  //
+  // Returns false if there is no recent enough record available.
+  //
+  // Updates |*sequence| with the sequence number of the record returned. To get
+  // the following record, increment this number by one.
+  //
+  // This function synchronizes with two other operations:
+  //
+  //    (1) Load-Acquire of |tail|
+  //
+  //        Together with the store-release in Publish(), this load-acquire
+  //        ensures each store to a record in PutRecordInternal() happens-before
+  //        any corresponding load in GetRecordInternal().
+  //
+  //        i.e. the stores for the records with sequence numbers < |tail| have
+  //        completed from our perspective
+  //
+  //    (2) Acquire Fence between record access & final load of |head|
+  //
+  //        Together with the release fence in Reserve(), this ensures that if
+  //        GetRecordInternal() loads a value stored in some execution of
+  //        PutRecordInternal(), then the store of |head| in the Reserve() that
+  //        preceeded it happens-before our final load of |head|.
+  //
+  //        i.e. if we read a record with sequence number >= |final_head| then
+  //        no later store to that record has completed from our perspective
+  bool Get(uint32_t* sequence /*inout*/, Record* record /*out*/) const {
+    for (;;) {
+      uint32_t tail = std::atomic_load_explicit(&header_mmap()->tail,
+                                                std::memory_order_acquire);
+      uint32_t head = std::atomic_load_explicit(&header_mmap()->head,
+                                                std::memory_order_relaxed);
+
+      if (tail - head > record_count())
+        continue;  // Concurrent modification; re-try.
+
+      if (*sequence - head > tail - head)
+        *sequence = head;  // Out of window, skip forward to first available.
+
+      if (*sequence == tail) return false;  // No new records available.
+
+      Geometry geometry =
+          CalculateGeometry(record_count(), record_size(), *sequence, tail);
+
+      // Compute address explicitly in case record_size > sizeof(Record).
+      RecordStorage* record_storage = record_mmap_reader(geometry.head_index);
+
+      GetRecordInternal(record_storage, record);
+
+      // NB: It is not sufficient to change this to a load-acquire of |head|.
+      std::atomic_thread_fence(std::memory_order_acquire);
+
+      uint32_t final_head = std::atomic_load_explicit(
+          &header_mmap()->head, std::memory_order_relaxed);
+
+      if (final_head - head > *sequence - head)
+        continue;  // Concurrent modification; re-try.
+
+      // Note: Combining the above 4 comparisons gives:
+      // 0 <= final_head - head <= sequence - head < tail - head <= record_count
+      //
+      // We can also write this as:
+      // head <=* final_head <=* sequence <* tail <=* head + record_count
+      //
+      // where <* orders by difference from head: x <* y if x - head < y - head.
+      // This agrees with the order of sequence updates during "put" operations.
+      return true;
+    }
+  }
+
+  // Copies the newest available record with sequence at least |*sequence| to
+  // |record|.
+  //
+  // Returns false if there is no recent enough record available.
+  //
+  // Updates |*sequence| with the sequence number of the record returned. To get
+  // the following record, increment this number by one.
+  bool GetNewest(uint32_t* sequence, Record* record) const {
+    uint32_t newest_sequence = GetNewestSequence();
+    if (*sequence == newest_sequence + 1) return false;
+    *sequence = newest_sequence;
+    return Get(sequence, record);
+  }
+
+  uint32_t record_count() const { return record_count_internal(); }
+  uint32_t record_size() const { return record_size_internal(); }
+  static constexpr uint32_t mmap_alignment() { return alignof(Mmap); }
+
+ private:
+  struct Header {
+    // Record size for reading out of the ring. Writers always write the full
+    // length; readers may need to read a prefix of each record.
+    std::atomic<uint32_t> record_size;
+
+    // Number of records in the ring.
+    std::atomic<uint32_t> record_count;
+
+    // Readable region is [head % record_count, tail % record_count).
+    //
+    // The region in [tail % record_count, head % record_count) was either never
+    // populated or is being updated.
+    //
+    // These are sequences numbers, not indexes - indexes should be computed
+    // with a modulus.
+    //
+    // To ensure consistency:
+    //
+    // (1) Writes advance |head| past any updated records before writing to
+    //     them, and advance |tail| after they are written.
+    // (2) Readers check |tail| before reading data and |head| after,
+    //     making sure to discard any data that was written to concurrently.
+    std::atomic<uint32_t> head;
+    std::atomic<uint32_t> tail;
+  };
+
+  // Store using the standard word size.
+  using StorageType = long;  // NOLINT
+
+  // Always require 8 byte alignment so that the same record sizes are legal on
+  // 32 and 64 bit builds.
+  static constexpr size_t kRecordAlignment = 8;
+  static_assert(kRecordAlignment % sizeof(StorageType) == 0,
+                "Bad record alignment");
+
+  struct RecordStorage {
+    // This is accessed with relaxed atomics to prevent data races on the
+    // contained data, which would be undefined behavior.
+    std::atomic<StorageType> data[sizeof(Record) / sizeof(StorageType)];
+  };
+
+  static_assert(sizeof(StorageType) *
+                        std::extent<decltype(RecordStorage::data)>() ==
+                    sizeof(Record),
+                "Record length must be a multiple of sizeof(StorageType)");
+
+  struct Geometry {
+    // Static geometry.
+    uint32_t record_count;
+    uint32_t record_size;
+
+    // Copy of atomic sequence counts.
+    uint32_t head;
+    uint32_t tail;
+
+    // First index of readable region.
+    uint32_t head_index;
+
+    // First index of writable region.
+    uint32_t tail_index;
+
+    // Number of records in readable region.
+    uint32_t count;
+
+    // Number of records in writable region.
+    uint32_t space;
+  };
+
+  // Mmap area layout.
+  //
+  // Readers should not index directly into |records| as this is not valid when
+  // dynamic record sizes are used; use record_mmap_reader() instead.
+  struct Mmap {
+    Header header;
+    RecordStorage records[];
+  };
+
+  static_assert(std::is_standard_layout<Mmap>::value,
+                "Mmap must be standard layout");
+  static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t),
+                "Lockless atomics contain extra state");
+  static_assert(sizeof(std::atomic<StorageType>) == sizeof(StorageType),
+                "Lockless atomics contain extra state");
+
+  explicit BroadcastRing(void* mmap) {
+    CHECK_EQ(0U, reinterpret_cast<uintptr_t>(mmap) % alignof(Mmap));
+    data_.mmap = reinterpret_cast<Mmap*>(mmap);
+  }
+
+  // Initializes the mmap area header for a new ring.
+  void InitializeHeader(uint32_t record_size, uint32_t record_count) {
+    constexpr uint32_t kInitialSequence = -256;  // Force an early wrap.
+    std::atomic_store_explicit(&header_mmap()->record_size, record_size,
+                               std::memory_order_relaxed);
+    std::atomic_store_explicit(&header_mmap()->record_count, record_count,
+                               std::memory_order_relaxed);
+    std::atomic_store_explicit(&header_mmap()->head, kInitialSequence,
+                               std::memory_order_relaxed);
+    std::atomic_store_explicit(&header_mmap()->tail, kInitialSequence,
+                               std::memory_order_relaxed);
+  }
+
+  // Validates ring geometry.
+  //
+  // Ring geometry is validated carefully on import and then cached. This allows
+  // us to avoid out-of-range accesses even if the parameters in the header are
+  // later changed.
+  bool ValidateGeometry(size_t mmap_size, uint32_t header_record_size,
+                        uint32_t header_record_count) {
+    set_record_size(header_record_size);
+    set_record_count(header_record_count);
+
+    if (record_size() != header_record_size) return false;
+    if (record_count() != header_record_count) return false;
+    if (record_count() < Traits::kMinRecordCount) return false;
+    if (record_size() < sizeof(Record)) return false;
+    if (record_size() % kRecordAlignment != 0) return false;
+    if (!IsPowerOfTwo(record_count())) return false;
+
+    size_t memory_size = record_count() * record_size();
+    if (memory_size / record_size() != record_count()) return false;
+    if (memory_size + sizeof(Header) < memory_size) return false;
+    if (memory_size + sizeof(Header) > mmap_size) return false;
+
+    return true;
+  }
+
+  // Copies a record into the ring.
+  //
+  // This is done with relaxed atomics because otherwise it is racy according to
+  // the C++ memory model. This is very low overhead once optimized.
+  static inline void PutRecordInternal(const Record* in, RecordStorage* out) {
+    StorageType data[sizeof(Record) / sizeof(StorageType)];
+    memcpy(data, in, sizeof(*in));
+    for (size_t i = 0; i < std::extent<decltype(data)>(); ++i) {
+      std::atomic_store_explicit(&out->data[i], data[i],
+                                 std::memory_order_relaxed);
+    }
+  }
+
+  // Copies a record out of the ring.
+  //
+  // This is done with relaxed atomics because otherwise it is racy according to
+  // the C++ memory model. This is very low overhead once optimized.
+  static inline void GetRecordInternal(RecordStorage* in, Record* out) {
+    StorageType data[sizeof(Record) / sizeof(StorageType)];
+    for (size_t i = 0; i < std::extent<decltype(data)>(); ++i) {
+      data[i] =
+          std::atomic_load_explicit(&in->data[i], std::memory_order_relaxed);
+    }
+    memcpy(out, &data, sizeof(*out));
+  }
+
+  // Converts a record's sequence number into a storage index.
+  static uint32_t SequenceToIndex(uint32_t sequence, uint32_t record_count) {
+    return sequence & (record_count - 1);
+  }
+
+  // Computes readable & writable ranges from ring parameters.
+  static Geometry CalculateGeometry(uint32_t record_count, uint32_t record_size,
+                                    uint32_t head, uint32_t tail) {
+    Geometry geometry;
+    geometry.record_count = record_count;
+    geometry.record_size = record_size;
+    DCHECK_EQ(0U, geometry.record_size % kRecordAlignment);
+    geometry.head = head;
+    geometry.tail = tail;
+    geometry.head_index = SequenceToIndex(head, record_count);
+    geometry.tail_index = SequenceToIndex(tail, record_count);
+    geometry.count = geometry.tail - geometry.head;
+    DCHECK_LE(geometry.count, record_count);
+    geometry.space = geometry.record_count - geometry.count;
+    return geometry;
+  }
+
+  // Gets the current ring readable & writable regions.
+  //
+  // This this is always safe from the writing thread since it is the only
+  // thread allowed to update the header.
+  Geometry GetGeometry() const {
+    return CalculateGeometry(
+        record_count(), record_size(),
+        std::atomic_load_explicit(&header_mmap()->head,
+                                  std::memory_order_relaxed),
+        std::atomic_load_explicit(&header_mmap()->tail,
+                                  std::memory_order_relaxed));
+  }
+
+  // Makes space for at least |reserve_count| records.
+  //
+  // There is nothing to prevent overwriting records that have concurrent
+  // readers. We do however ensure that this situation can be detected: the
+  // fence ensures the |head| update will be the first update seen by readers,
+  // and readers check this value after reading and discard data that may have
+  // been concurrently modified.
+  void Reserve(uint32_t reserve_count) {
+    Geometry geometry = GetGeometry();
+    DCHECK_LE(reserve_count, Traits::kMaxReservedRecords);
+    uint32_t needed =
+        (geometry.space >= reserve_count ? 0 : reserve_count - geometry.space);
+
+    std::atomic_store_explicit(&header_mmap()->head, geometry.head + needed,
+                               std::memory_order_relaxed);
+
+    // NB: It is not sufficient to change this to a store-release of |head|.
+    std::atomic_thread_fence(std::memory_order_release);
+  }
+
+  // Makes |publish_count| records visible to readers.
+  //
+  // Space must have been reserved by a previous call to Reserve().
+  void Publish(uint32_t publish_count) {
+    Geometry geometry = GetGeometry();
+    DCHECK_LE(publish_count, geometry.space);
+    std::atomic_store_explicit(&header_mmap()->tail,
+                               geometry.tail + publish_count,
+                               std::memory_order_release);
+  }
+
+  // Helpers to compute addresses in mmap area.
+  Mmap* mmap() const { return data_.mmap; }
+  Header* header_mmap() const { return &data_.mmap->header; }
+  RecordStorage* record_mmap_writer(uint32_t index) const {
+    DCHECK_EQ(sizeof(Record), record_size());
+    return &data_.mmap->records[index];
+  }
+  RecordStorage* record_mmap_reader(uint32_t index) const {
+    if (Traits::kUseStaticRecordSize) {
+      return &data_.mmap->records[index];
+    } else {
+      // Calculate the location of a record in the ring without assuming that
+      // sizeof(Record) == record_size.
+      return reinterpret_cast<RecordStorage*>(
+          reinterpret_cast<char*>(data_.mmap->records) + index * record_size());
+    }
+  }
+
+  // The following horrifying template gunk enables us to store just the mmap
+  // base pointer for compile-time statically sized rings. Dynamically sized
+  // rings also store the validated copy of the record size & count.
+  //
+  // This boils down to: use a compile time constant if available, and otherwise
+  // load the value that was validated on import from a member variable.
+  template <typename T = Traits>
+  typename std::enable_if<T::kUseStaticRecordSize, uint32_t>::type
+  record_size_internal() const {
+    return sizeof(Record);
+  }
+
+  template <typename T = Traits>
+  typename std::enable_if<!T::kUseStaticRecordSize, uint32_t>::type
+  record_size_internal() const {
+    return data_.record_size;
+  }
+
+  template <typename T = Traits>
+  typename std::enable_if<T::kUseStaticRecordSize, void>::type set_record_size(
+      uint32_t /*record_size*/) {}
+
+  template <typename T = Traits>
+  typename std::enable_if<!T::kUseStaticRecordSize, void>::type set_record_size(
+      uint32_t record_size) {
+    data_.record_size = record_size;
+  }
+
+  template <typename T = Traits>
+  typename std::enable_if<T::kUseStaticRecordCount, uint32_t>::type
+  record_count_internal() const {
+    return Traits::kStaticRecordCount;
+  }
+
+  template <typename T = Traits>
+  typename std::enable_if<!T::kUseStaticRecordCount, uint32_t>::type
+  record_count_internal() const {
+    return data_.record_count;
+  }
+
+  template <typename T = Traits>
+  typename std::enable_if<T::kUseStaticRecordCount, void>::type
+  set_record_count(uint32_t /*record_count*/) const {}
+
+  template <typename T = Traits>
+  typename std::enable_if<!T::kUseStaticRecordCount, void>::type
+  set_record_count(uint32_t record_count) {
+    data_.record_count = record_count;
+  }
+
+  // Data we need to store for statically sized rings.
+  struct DataStaticSize {
+    Mmap* mmap = nullptr;
+  };
+
+  // Data we need to store for dynamically sized rings.
+  struct DataDynamicSize {
+    Mmap* mmap = nullptr;
+
+    // These are cached to make sure misbehaving writers cannot cause
+    // out-of-bounds memory accesses by updating the values in the mmap header.
+    uint32_t record_size = 0;
+    uint32_t record_count = 0;
+  };
+
+  using DataStaticOrDynamic =
+      typename std::conditional<Traits::kIsStaticSize, DataStaticSize,
+                                DataDynamicSize>::type;
+
+  DataStaticOrDynamic data_;
+};
+
+}  // namespace dvr
+}  // namespace android
+
+#endif  // ANDROID_DVR_BROADCAST_RING_H_