From 575e78c41ece0dec969d31f46be563d4eb7ae43b Mon Sep 17 00:00:00 2001 From: Andreas Gampe Date: Mon, 3 Nov 2014 23:41:03 -0800 Subject: [PATCH] ART: Replace COMPILE_ASSERT with static_assert (runtime) Replace all occurrences of COMPILE_ASSERT in the runtime tree. Change-Id: I01e420899c760094fb342cc6cb9e692dd670a0b2 --- runtime/atomic.h | 12 ++-- runtime/base/allocator.h | 2 +- runtime/base/bit_vector.cc | 4 +- runtime/base/casts.h | 2 +- runtime/base/logging.cc | 4 +- runtime/base/mutex.h | 6 +- runtime/class_linker.cc | 4 +- runtime/dex_file.h | 8 +-- .../quick/quick_trampoline_entrypoints.cc | 15 +++-- runtime/gc/accounting/card_table.cc | 4 +- runtime/gc/space/bump_pointer_space.h | 4 +- runtime/handle_scope-inl.h | 2 +- runtime/hprof/hprof.cc | 4 +- runtime/mirror/class-inl.h | 2 +- runtime/mirror/class.cc | 2 +- runtime/mirror/class.h | 2 +- runtime/quick/inline_method_analyser.cc | 76 +++++++++------------- runtime/quick/inline_method_analyser.h | 5 +- runtime/thread.h | 6 +- runtime/utils.h | 4 +- runtime/verifier/instruction_flags.h | 3 +- 21 files changed, 81 insertions(+), 90 deletions(-) diff --git a/runtime/atomic.h b/runtime/atomic.h index e57c0c03e..cf6127705 100644 --- a/runtime/atomic.h +++ b/runtime/atomic.h @@ -293,17 +293,17 @@ class PACKED(sizeof(T)) Atomic : public std::atomic { typedef Atomic AtomicInteger; -COMPILE_ASSERT(sizeof(AtomicInteger) == sizeof(int32_t), weird_atomic_int_size); -COMPILE_ASSERT(alignof(AtomicInteger) == alignof(int32_t), - atomic_int_alignment_differs_from_that_of_underlying_type); -COMPILE_ASSERT(sizeof(Atomic) == sizeof(int64_t), weird_atomic_int64_size); +static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size"); +static_assert(alignof(AtomicInteger) == alignof(int32_t), + "AtomicInteger alignment differs from that of underlyingtype"); +static_assert(sizeof(Atomic) == sizeof(int64_t), "Weird Atomic size"); // Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit // architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte // aligned. #if defined(__LP64__) - COMPILE_ASSERT(alignof(Atomic) == alignof(int64_t), - atomic_int64_alignment_differs_from_that_of_underlying_type); + static_assert(alignof(Atomic) == alignof(int64_t), + "Atomic alignment differs from that of underlying type"); #endif } // namespace art diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h index 3ca9ebbd5..30f7f128e 100644 --- a/runtime/base/allocator.h +++ b/runtime/base/allocator.h @@ -119,7 +119,7 @@ class TrackingAllocatorImpl { // Used internally by STL data structures. TrackingAllocatorImpl() throw() { - COMPILE_ASSERT(kTag < kAllocatorTagCount, must_be_less_than_count); + static_assert(kTag < kAllocatorTagCount, "kTag must be less than kAllocatorTagCount"); } // Enables an allocator for objects of one type to allocate storage for objects of another type. diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc index 63e9355d2..4390180cf 100644 --- a/runtime/base/bit_vector.cc +++ b/runtime/base/bit_vector.cc @@ -41,8 +41,8 @@ BitVector::BitVector(uint32_t start_bits, storage_size_(storage_size), allocator_(allocator), expandable_(expandable) { - COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes); - COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits); + static_assert(sizeof(*storage_) == kWordBytes, "word bytes"); + static_assert(sizeof(*storage_) * 8u == kWordBits, "word bits"); if (storage_ == nullptr) { storage_size_ = BitsToWords(start_bits); storage_ = static_cast(allocator_->Alloc(storage_size_ * kWordBytes)); diff --git a/runtime/base/casts.h b/runtime/base/casts.h index 138c2fda8..c7e39a29f 100644 --- a/runtime/base/casts.h +++ b/runtime/base/casts.h @@ -77,7 +77,7 @@ template inline Dest bit_cast(const Source& source) { // Compile time assertion: sizeof(Dest) == sizeof(Source) // A compile error here means your Dest and Source have different sizes. - COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), verify_sizes_are_equal); + static_assert(sizeof(Dest) == sizeof(Source), "sizes should be equal"); Dest dest; memcpy(&dest, &source, sizeof(dest)); return dest; diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 46c3538ca..d3a2655c9 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -213,8 +213,8 @@ static const android_LogPriority kLogSeverityToAndroidLogPriority[] = { ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN, ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL }; -COMPILE_ASSERT(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1, - mismatch_in_size_of_kLogSeverityToAndroidLogPriority_and_values_in_LogSeverity); +static_assert(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1, + "Mismatch in size of kLogSeverityToAndroidLogPriority and values in LogSeverity"); #endif void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity, diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 628231a27..d589eb69a 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -435,7 +435,7 @@ class SCOPED_LOCKABLE MutexLock { DISALLOW_COPY_AND_ASSIGN(MutexLock); }; // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)". -#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name) +#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name") // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon // construction and releases it upon destruction. @@ -457,7 +457,7 @@ class SCOPED_LOCKABLE ReaderMutexLock { }; // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of // "ReaderMutexLock mu(lock)". -#define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name) +#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name") // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon // construction and releases it upon destruction. @@ -479,7 +479,7 @@ class SCOPED_LOCKABLE WriterMutexLock { }; // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of // "WriterMutexLock mu(lock)". -#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name) +#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name") // Global mutexes corresponding to the levels above. class Locks { diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index f5ac35074..08efb70ed 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -5863,8 +5863,8 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) { "[S", "[Ljava/lang/StackTraceElement;", }; - COMPILE_ASSERT(arraysize(class_roots_descriptors) == size_t(kClassRootsMax), - mismatch_between_class_descriptors_and_class_root_enum); + static_assert(arraysize(class_roots_descriptors) == size_t(kClassRootsMax), + "Mismatch between class descriptors and class-root enum"); const char* descriptor = class_roots_descriptors[class_root]; CHECK(descriptor != nullptr); diff --git a/runtime/dex_file.h b/runtime/dex_file.h index a07a5b6c9..8ced66463 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -206,10 +206,10 @@ class DexFile { // (class or interface). These are all in the lower 16b and do not contain runtime flags. uint32_t GetJavaAccessFlags() const { // Make sure that none of our runtime-only flags are set. - COMPILE_ASSERT((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags, - valid_class_flags_not_subset_of_java_flags); - COMPILE_ASSERT((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags, - valid_interface_flags_not_subset_of_java_flags); + static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags, + "Valid class flags not a subset of Java flags"); + static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags, + "Valid interface flags not a subset of Java flags"); if ((access_flags_ & kAccInterface) != 0) { // Interface. diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 5cb51789c..2cebd6e7a 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -232,13 +232,14 @@ class QuickArgumentVisitor { + sizeof(StackReference)), // Skip StackReference. gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { - COMPILE_ASSERT(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), knum_of_quick_fpr_arg_unexpected); - COMPILE_ASSERT(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), - kdouble_align_unexpected); + static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), + "Number of Quick FPR arguments unexpected"); + static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), + "Double alignment unexpected"); // For register alignment, we want to assume that counters(fpr_double_index_) are even if the // next register is even. - COMPILE_ASSERT(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, - knum_quick_fpr_args_not_even); + static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, + "Number of Quick FPR arguments not even"); } virtual ~QuickArgumentVisitor() {} @@ -965,8 +966,8 @@ template class BuildNativeCallFrameStateMachine { delegate_(delegate) { // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff // the next register is even; counting down is just to make the compiler happy... - COMPILE_ASSERT(kNumNativeGprArgs % 2 == 0U, knum_native_gpr_args_not_even); - COMPILE_ASSERT(kNumNativeFprArgs % 2 == 0U, knum_native_fpr_args_not_even); + static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); + static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); } virtual ~BuildNativeCallFrameStateMachine() {} diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc index 9a6f2b20b..b7b6099e6 100644 --- a/runtime/gc/accounting/card_table.cc +++ b/runtime/gc/accounting/card_table.cc @@ -66,7 +66,7 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) { CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg; // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we // don't clear the card table to avoid unnecessary pages being allocated - COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0); + static_assert(kCardClean == 0, "kCardClean must be 0"); uint8_t* cardtable_begin = mem_map->Begin(); CHECK(cardtable_begin != NULL); @@ -98,7 +98,7 @@ void CardTable::ClearSpaceCards(space::ContinuousSpace* space) { } void CardTable::ClearCardTable() { - COMPILE_ASSERT(kCardClean == 0, clean_card_must_be_0); + static_assert(kCardClean == 0, "kCardClean must be 0"); mem_map_->MadviseDontNeedAndZero(); } diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index 98a3189f1..089ede445 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -186,8 +186,8 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { size_t unused_; // Ensures alignment of kAlignment. }; - COMPILE_ASSERT(sizeof(BlockHeader) % kAlignment == 0, - continuous_block_must_be_kAlignment_aligned); + static_assert(sizeof(BlockHeader) % kAlignment == 0, + "continuous block must be kAlignment aligned"); friend class collector::MarkSweep; DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace); diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index b0aadec0f..9ddaf61c9 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -27,7 +27,7 @@ namespace art { template inline StackHandleScope::StackHandleScope(Thread* self, mirror::Object* fill_value) : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) { - COMPILE_ASSERT(kNumReferences >= 1, stack_handle_scope_must_contain_at_least_1_reference); + static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference"); // TODO: Figure out how to use a compile assert. CHECK_EQ(&storage_[0], GetReferences()); for (size_t i = 0; i < kNumReferences; ++i) { diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index a2d37b3de..14d743250 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -637,8 +637,8 @@ class Hprof { // U4: size of identifiers. We're using addresses as IDs and our heap references are stored // as uint32_t. // Note of warning: hprof-conv hard-codes the size of identifiers to 4. - COMPILE_ASSERT(sizeof(mirror::HeapReference) == sizeof(uint32_t), - UnexpectedHeapReferenceSize); + static_assert(sizeof(mirror::HeapReference) == sizeof(uint32_t), + "Unexpected HeapReference size"); U4_TO_BUF_BE(buf, 0, sizeof(uint32_t)); fwrite(buf, 1, sizeof(uint32_t), header_fp_); diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 892bf444f..5f72dbe89 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -279,7 +279,7 @@ inline bool Class::ResolvedFieldAccessTest(Class* access_to, ArtField* field, template inline bool Class::ResolvedMethodAccessTest(Class* access_to, ArtMethod* method, uint32_t method_idx, DexCache* dex_cache) { - COMPILE_ASSERT(throw_on_failure || throw_invoke_type == kStatic, non_default_throw_invoke_type); + static_assert(throw_on_failure || throw_invoke_type == kStatic, "Non-default throw invoke type"); DCHECK_EQ(use_referrers_cache, dex_cache == nullptr); if (UNLIKELY(!this->CanAccess(access_to))) { // The referrer class can't access the method's declaring class but may still be able diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 828d9861e..61bfe41ae 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -117,7 +117,7 @@ void Class::SetStatus(Status new_status, Thread* self) { self->SetException(gc_safe_throw_location, old_exception.Get()); self->SetExceptionReportedToInstrumentation(is_exception_reported); } - COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32); + static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32"); if (Runtime::Current()->IsActiveTransaction()) { SetField32Volatile(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status); } else { diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 68fbb8b4e..21cf53f8b 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -131,7 +131,7 @@ class MANAGED Class FINAL : public Object { template Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32); + static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32"); return static_cast( GetField32Volatile(OFFSET_OF_OBJECT_MEMBER(Class, status_))); } diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc index d8fc2776d..3415e8f9b 100644 --- a/runtime/quick/inline_method_analyser.cc +++ b/runtime/quick/inline_method_analyser.cc @@ -35,50 +35,38 @@ namespace art { -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), - check_iget_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), - check_iget_wide_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT), - check_iget_object_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN), - check_iget_boolean_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), - check_iget_byte_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), - check_iget_char_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), - check_iget_short_type); - -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), - check_iput_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), - check_iput_wide_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT), - check_iput_object_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN), - check_iput_boolean_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), - check_iput_byte_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), - check_iput_char_type); -COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), - check_iput_short_type); - -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT), check_iget_iput_variant); -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), check_iget_iput_wide_variant); -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), check_iget_iput_object_variant); -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), check_iget_iput_boolean_variant); -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), check_iget_iput_byte_variant); -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), check_iget_iput_char_variant); -COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) == - InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), check_iget_iput_short_variant); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT), + "iget_object type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN), + "iget_boolean type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), "iget_byte type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), "iget_char type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), "iget_short type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), "iput type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), "iput_wide type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT), + "iput_object type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN), + "iput_boolean type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), "iput_byte type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), "iput_char type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), "iput_short type"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT), "iget/iput variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), "iget/iput_wide variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), "iget/iput_object variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), "iget/iput_boolean variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), "iget/iput_byte variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), "iget/iput_char variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant"); // This is used by compiler and debugger. We look into the dex cache for resolved methods and // fields. However, in the context of the debugger, not all methods and fields are resolved. Since diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h index a2ae3970f..a8d430895 100644 --- a/runtime/quick/inline_method_analyser.h +++ b/runtime/quick/inline_method_analyser.h @@ -118,7 +118,7 @@ struct InlineIGetIPutData { uint32_t is_volatile : 1; uint32_t field_offset : 31; }; -COMPILE_ASSERT(sizeof(InlineIGetIPutData) == sizeof(uint64_t), InvalidSizeOfInlineIGetIPutData); +static_assert(sizeof(InlineIGetIPutData) == sizeof(uint64_t), "Invalid size of InlineIGetIPutData"); struct InlineReturnArgData { uint16_t arg; @@ -127,7 +127,8 @@ struct InlineReturnArgData { uint16_t reserved : 14; uint32_t reserved2; }; -COMPILE_ASSERT(sizeof(InlineReturnArgData) == sizeof(uint64_t), InvalidSizeOfInlineReturnArgData); +static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t), + "Invalid size of InlineReturnArgData"); struct InlineMethod { InlineMethodOpcode opcode; diff --git a/runtime/thread.h b/runtime/thread.h index 694dbda97..c243413fe 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -927,7 +927,7 @@ class Thread { // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409 DISALLOW_COPY_AND_ASSIGN(StateAndFlags); }; - COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size); + static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size"); static void ThreadExitCallback(void* arg); @@ -963,8 +963,8 @@ class Thread { } union StateAndFlags state_and_flags; - COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t), - sizeof_state_and_flags_and_int32_are_different); + static_assert(sizeof(union StateAndFlags) == sizeof(int32_t), + "Size of state_and_flags and int32 are different"); // A non-zero value is used to tell the current thread to enter a safe point // at the next poll. diff --git a/runtime/utils.h b/runtime/utils.h index 39011e29c..669fe6cd0 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -84,7 +84,7 @@ static constexpr bool IsPowerOfTwo(T x) { template static inline bool IsAligned(T x) { - COMPILE_ASSERT((n & (n - 1)) == 0, n_not_power_of_two); + static_assert((n & (n - 1)) == 0, "n is not a power of two"); return (x & (n - 1)) == 0; } @@ -222,7 +222,7 @@ static inline bool NeedsEscaping(uint16_t ch) { // of V >= size of U (compile-time checked). template static inline V bit_cast(U in) { - COMPILE_ASSERT(sizeof(U) <= sizeof(V), size_of_u_not_le_size_of_v); + static_assert(sizeof(U) <= sizeof(V), "Size of U not <= size of V"); union { U u; V v; diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h index 36a6e554d..e67067cdd 100644 --- a/runtime/verifier/instruction_flags.h +++ b/runtime/verifier/instruction_flags.h @@ -130,7 +130,8 @@ class InstructionFlags FINAL { uint8_t flags_; }; -COMPILE_ASSERT(sizeof(InstructionFlags) == sizeof(uint8_t), err); +static_assert(sizeof(InstructionFlags) == sizeof(uint8_t), + "Size of InstructionFlags not equal to uint8_t"); } // namespace verifier } // namespace art -- 2.11.0