From 14d90579f013b374638b599361970557ed4b3f09 Mon Sep 17 00:00:00 2001 From: Roland Levillain Date: Thu, 16 Jul 2015 10:52:26 +0100 Subject: [PATCH] Use (D)CHECK_ALIGNED more. Change-Id: I9d740f6a88d01e028d4ddc3e4e62b0a73ea050af --- compiler/dex/quick/arm/assemble_arm.cc | 2 +- compiler/dex/quick/arm/utility_arm.cc | 12 ++++---- compiler/dex/quick/arm64/assemble_arm64.cc | 6 ++-- compiler/dex/quick/mips/utility_mips.cc | 14 ++++----- compiler/dex/quick/x86/utility_x86.cc | 14 ++++----- .../linker/arm/relative_patcher_thumb2_test.cc | 2 +- compiler/linker/arm64/relative_patcher_arm64.cc | 4 +-- .../linker/arm64/relative_patcher_arm64_test.cc | 2 +- compiler/utils/arm/assembler_arm.cc | 4 +-- compiler/utils/arm/assembler_thumb2.cc | 36 +++++++++++----------- runtime/base/histogram-inl.h | 2 +- runtime/class_linker.cc | 6 ++-- runtime/exception_test.cc | 2 +- runtime/fault_handler.cc | 2 +- runtime/gc/accounting/read_barrier_table.h | 4 +-- runtime/gc/accounting/space_bitmap.cc | 2 +- runtime/gc/allocator/rosalloc.cc | 10 +++--- runtime/gc/collector/concurrent_copying.cc | 10 +++--- runtime/gc/collector/mark_sweep.cc | 4 +-- runtime/gc/collector/semi_space-inl.h | 2 +- runtime/gc/heap.cc | 4 +-- runtime/gc/space/bump_pointer_space-inl.h | 2 +- runtime/gc/space/large_object_space.cc | 2 +- runtime/gc/space/malloc_space.cc | 12 ++++---- runtime/gc/space/region_space-inl.h | 6 ++-- runtime/gc/space/region_space.cc | 4 +-- runtime/interpreter/interpreter_common.h | 8 ++--- runtime/lock_word.h | 2 +- runtime/mem_map.cc | 16 +++++----- runtime/oat.cc | 2 +- runtime/stack.cc | 2 +- 31 files changed, 100 insertions(+), 100 deletions(-) diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index df4a9f204..5f911db38 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1298,7 +1298,7 @@ void ArmMir2Lir::AssembleLIR() { */ delta &= ~0x3; } - DCHECK_EQ((delta & 0x3), 0); + DCHECK_ALIGNED(delta, 4); // First, a sanity check for cases we shouldn't see now if (kIsDebugBuild && (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) || ((lir->opcode == kThumbLdrPcRel) && (delta > 1020)))) { diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 2ef92f851..062f7aff6 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -880,7 +880,7 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base, int displacement, RegStorage r_src_dest, RegStorage r_work) { - DCHECK_EQ(displacement & 3, 0); + DCHECK_ALIGNED(displacement, 4); constexpr int kOffsetMask = 0xff << 2; int encoded_disp = (displacement & kOffsetMask) >> 2; // Within range of the instruction. RegStorage r_ptr = r_base; @@ -942,7 +942,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag already_generated = true; break; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); scale = 2; if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) && (displacement >= 0)) { @@ -959,14 +959,14 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag } break; case kUnsignedHalf: - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); scale = 1; short_form = all_low && (displacement >> (5 + scale)) == 0; opcode16 = kThumbLdrhRRI5; opcode32 = kThumb2LdrhRRI12; break; case kSignedHalf: - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); scale = 1; DCHECK_EQ(opcode16, kThumbBkpt); // Not available. opcode32 = kThumb2LdrshRRI12; @@ -1096,7 +1096,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora already_generated = true; break; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); scale = 2; if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) { short_form = true; @@ -1109,7 +1109,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora break; case kUnsignedHalf: case kSignedHalf: - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); scale = 1; short_form = all_low && (displacement >> (5 + scale)) == 0; opcode16 = kThumbStrhRRI5; diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index b78fb80aa..25c69d19e 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -909,7 +909,7 @@ void Arm64Mir2Lir::AssembleLIR() { CodeOffset target = target_lir->offset + ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t delta = target - pc; - DCHECK_EQ(delta & 0x3, 0); + DCHECK_ALIGNED(delta, 4); if (!IS_SIGNED_IMM26(delta >> 2)) { LOG(FATAL) << "Invalid jump range in kFixupT1Branch"; } @@ -933,7 +933,7 @@ void Arm64Mir2Lir::AssembleLIR() { CodeOffset target = target_lir->offset + ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t delta = target - pc; - DCHECK_EQ(delta & 0x3, 0); + DCHECK_ALIGNED(delta, 4); if (!IS_SIGNED_IMM19(delta >> 2)) { LOG(FATAL) << "Invalid jump range in kFixupLoad"; } @@ -965,7 +965,7 @@ void Arm64Mir2Lir::AssembleLIR() { CodeOffset target = target_lir->offset + ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t delta = target - pc; - DCHECK_EQ(delta & 0x3, 0); + DCHECK_ALIGNED(delta, 4); // Check if branch offset can be encoded in tbz/tbnz. if (!IS_SIGNED_IMM14(delta >> 2)) { DexOffset dalvik_offset = lir->dalvik_offset; diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 37e5804f1..ec2475a7f 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -714,7 +714,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora } else { opcode = kMipsFldc1; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; } is64bit = true; @@ -736,15 +736,15 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora DCHECK(r_dest.IsDouble()); } } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kUnsignedHalf: opcode = kMipsLhu; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kSignedHalf: opcode = kMipsLh; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: opcode = kMipsLbu; @@ -891,7 +891,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStor } else { opcode = kMipsFsdc1; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; } is64bit = true; @@ -913,12 +913,12 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStor DCHECK(r_src.IsDouble()); } } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kUnsignedHalf: case kSignedHalf: opcode = kMipsSh; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: case kSignedByte: diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 61a1becac..b16ae982f 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -659,7 +659,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86Mov32RA : kX86Mov32RM; } // TODO: double store is to unaligned address - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kWord: if (cu_->target64) { @@ -677,15 +677,15 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86MovssRA : kX86MovssRM; DCHECK(r_dest.IsFloat()); } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kUnsignedHalf: opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kSignedHalf: opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM; @@ -812,7 +812,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86Mov32AR : kX86Mov32MR; } // TODO: double store is to unaligned address - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kWord: if (cu_->target64) { @@ -831,13 +831,13 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86MovssAR : kX86MovssMR; DCHECK(r_src.IsSingle()); } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); consider_non_temporal = true; break; case kUnsignedHalf: case kSignedHalf: opcode = is_array ? kX86Mov16AR : kX86Mov16MR; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: case kSignedByte: diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc index b4aa286d2..13f67e6fd 100644 --- a/compiler/linker/arm/relative_patcher_thumb2_test.cc +++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc @@ -50,7 +50,7 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { // We want to put the method3 at a very precise offset. const uint32_t method3_offset = method1_offset + distance_without_thunks; - CHECK(IsAligned(method3_offset - sizeof(OatQuickMethodHeader))); + CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment); // Calculate size of method2 so that we put method3 at the correct place. const uint32_t method2_offset = diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc index 29355d696..6b9c530d7 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.cc +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -108,7 +108,7 @@ uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { if (!current_method_thunks_.empty()) { uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64); if (kIsDebugBuild) { - CHECK(IsAligned(current_method_thunks_.size())); + CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; CHECK_LE(num_thunks, processed_adrp_thunks_); for (size_t i = 0u; i != num_thunks; ++i) { @@ -203,7 +203,7 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector* code, if ((adrp & 0x9f000000u) != 0x90000000u) { CHECK(fix_cortex_a53_843419_); CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B - CHECK(IsAligned(current_method_thunks_.size())); + CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; CHECK_LE(num_thunks, processed_adrp_thunks_); uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset; diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc index 1bad8a929..b3af4c6a0 100644 --- a/compiler/linker/arm64/relative_patcher_arm64_test.cc +++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc @@ -66,7 +66,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { // We want to put the method3 at a very precise offset. const uint32_t last_method_offset = method1_offset + distance_without_thunks; const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader); - CHECK(IsAligned(gap_end)); + CHECK_ALIGNED(gap_end, kArm64Alignment); // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB). // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 09d22703f..0e3e08c2d 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -252,11 +252,11 @@ uint32_t Address::encodingThumbLdrdStrd() const { if (offset_ < 0) { int32_t off = -offset_; CHECK_LT(off, 1024); - CHECK_EQ((off & 3 /* 0b11 */), 0); // Must be multiple of 4. + CHECK_ALIGNED(off, 4); encoding = (am ^ (1 << kUShift)) | off >> 2; // Flip U to adjust sign. } else { CHECK_LT(offset_, 1024); - CHECK_EQ((offset_ & 3 /* 0b11 */), 0); // Must be multiple of 4. + CHECK_ALIGNED(offset_, 4); encoding = am | offset_ >> 2; } encoding |= static_cast(rn_) << 16; diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index 88b2f2cc4..584388672 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -101,7 +101,7 @@ uint32_t Thumb2Assembler::AdjustFixups() { } // Adjust literal pool labels for padding. - DCHECK_EQ(current_code_size & 1u, 0u); + DCHECK_ALIGNED(current_code_size, 2); uint32_t literals_adjustment = current_code_size + (current_code_size & 2) - buffer_.Size(); if (literals_adjustment != 0u) { for (Literal& literal : literals_) { @@ -152,7 +152,7 @@ void Thumb2Assembler::EmitLiterals() { // Load literal instructions (LDR, LDRD, VLDR) require 4-byte alignment. // We don't support byte and half-word literals. uint32_t code_size = buffer_.Size(); - DCHECK_EQ(code_size & 1u, 0u); + DCHECK_ALIGNED(code_size, 2); if ((code_size & 2u) != 0u) { Emit16(0); } @@ -168,7 +168,7 @@ void Thumb2Assembler::EmitLiterals() { } inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) { - DCHECK_EQ(offset & 1, 0); + DCHECK_ALIGNED(offset, 2); int16_t encoding = B15 | B14; if (cond != AL) { DCHECK(IsInt<9>(offset)); @@ -181,7 +181,7 @@ inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) { } inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) { - DCHECK_EQ(offset & 1, 0); + DCHECK_ALIGNED(offset, 2); int32_t s = (offset >> 31) & 1; // Sign bit. int32_t encoding = B31 | B30 | B29 | B28 | B15 | (s << 26) | // Sign bit goes to bit 26. @@ -205,7 +205,7 @@ inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) { inline int16_t Thumb2Assembler::CbxzEncoding16(Register rn, int32_t offset, Condition cond) { DCHECK(!IsHighRegister(rn)); - DCHECK_EQ(offset & 1, 0); + DCHECK_ALIGNED(offset, 2); DCHECK(IsUint<7>(offset)); DCHECK(cond == EQ || cond == NE); return B15 | B13 | B12 | B8 | (cond == NE ? B11 : 0) | static_cast(rn) | @@ -250,7 +250,7 @@ inline int32_t Thumb2Assembler::MovModImmEncoding32(Register rd, int32_t value) inline int16_t Thumb2Assembler::LdrLitEncoding16(Register rt, int32_t offset) { DCHECK(!IsHighRegister(rt)); - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); DCHECK(IsUint<10>(offset)); return B14 | B11 | (static_cast(rt) << 8) | (offset >> 2); } @@ -261,7 +261,7 @@ inline int32_t Thumb2Assembler::LdrLitEncoding32(Register rt, int32_t offset) { } inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset) { - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); CHECK(IsUint<10>(offset)); return B31 | B30 | B29 | B27 | B24 /* P = 1 */ | B23 /* U = 1 */ | B22 | 0 /* W = 0 */ | B20 | @@ -270,7 +270,7 @@ inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Regist } inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32_t offset) { - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); CHECK(IsUint<10>(offset)); return B31 | B30 | B29 | B27 | B26 | B24 | B23 /* U = 1 */ | B20 | B11 | B9 | @@ -281,7 +281,7 @@ inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32 } inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32_t offset) { - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); CHECK(IsUint<10>(offset)); return B31 | B30 | B29 | B27 | B26 | B24 | B23 /* U = 1 */ | B20 | B11 | B9 | B8 | @@ -294,7 +294,7 @@ inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32 inline int16_t Thumb2Assembler::LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset) { DCHECK(!IsHighRegister(rt)); DCHECK(!IsHighRegister(rn)); - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); DCHECK(IsUint<7>(offset)); return B14 | B13 | B11 | (static_cast(rn) << 3) | static_cast(rt) | @@ -1423,7 +1423,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED, thumb_opcode = 3U /* 0b11 */; opcode_shift = 12; CHECK_LT(immediate, (1u << 9)); - CHECK_EQ((immediate & 3u /* 0b11 */), 0u); + CHECK_ALIGNED(immediate, 4); // Remove rd and rn from instruction by orring it with immed and clearing bits. rn = R0; @@ -1437,7 +1437,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED, thumb_opcode = 5U /* 0b101 */; opcode_shift = 11; CHECK_LT(immediate, (1u << 10)); - CHECK_EQ((immediate & 3u /* 0b11 */), 0u); + CHECK_ALIGNED(immediate, 4); // Remove rn from instruction. rn = R0; @@ -1474,7 +1474,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED, thumb_opcode = 0x61 /* 0b1100001 */; opcode_shift = 7; CHECK_LT(immediate, (1u << 9)); - CHECK_EQ((immediate & 3u /* 0b11 */), 0u); + CHECK_ALIGNED(immediate, 4); // Remove rd and rn from instruction by orring it with immed and clearing bits. rn = R0; @@ -1652,7 +1652,7 @@ inline uint32_t Thumb2Assembler::Fixup::GetSizeInBytes() const { inline size_t Thumb2Assembler::Fixup::LiteralPoolPaddingSize(uint32_t current_code_size) { // The code size must be a multiple of 2. - DCHECK_EQ(current_code_size & 1u, 0u); + DCHECK_ALIGNED(current_code_size, 2); // If it isn't a multiple of 4, we need to add a 2-byte padding before the literal pool. return current_code_size & 2; } @@ -1697,7 +1697,7 @@ inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) con // Load literal instructions round down the PC+4 to a multiple of 4, so if the PC // isn't a multiple of 2, we need to adjust. Since we already adjusted for the target // being aligned, current PC alignment can be inferred from diff. - DCHECK_EQ(diff & 1, 0); + DCHECK_ALIGNED(diff, 2); diff = diff + (diff & 2); DCHECK_GE(diff, 0); break; @@ -2045,7 +2045,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond, if (sp_relative) { // SP relative, 10 bit offset. CHECK_LT(offset, (1 << 10)); - CHECK_EQ((offset & 3 /* 0b11 */), 0); + CHECK_ALIGNED(offset, 4); encoding |= rd << 8 | offset >> 2; } else { // No SP relative. The offset is shifted right depending on @@ -2058,12 +2058,12 @@ void Thumb2Assembler::EmitLoadStore(Condition cond, } else if (half) { // 6 bit offset, shifted by 1. CHECK_LT(offset, (1 << 6)); - CHECK_EQ((offset & 1 /* 0b1 */), 0); + CHECK_ALIGNED(offset, 2); offset >>= 1; } else { // 7 bit offset, shifted by 2. CHECK_LT(offset, (1 << 7)); - CHECK_EQ((offset & 3 /* 0b11 */), 0); + CHECK_ALIGNED(offset, 4); offset >>= 2; } encoding |= rn << 3 | offset << 6; diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index aba376287..03980e327 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -66,7 +66,7 @@ inline void Histogram::GrowBuckets(Value new_max) { while (max_ < new_max) { // If we have reached the maximum number of buckets, merge buckets together. if (frequency_.size() >= max_buckets_) { - CHECK(IsAligned<2>(frequency_.size())); + CHECK_ALIGNED(frequency_.size(), 2); // We double the width of each bucket to reduce the number of buckets by a factor of 2. bucket_width_ *= 2; const size_t limit = frequency_.size() / 2; diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 1ec02aa44..122c35fdc 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -249,13 +249,13 @@ static void ShuffleForward(size_t* current_field_idx, if (!gaps->empty() && gaps->top().size >= n) { FieldGap gap = gaps->top(); gaps->pop(); - DCHECK(IsAligned(gap.start_offset)); + DCHECK_ALIGNED(gap.start_offset, n); field->SetOffset(MemberOffset(gap.start_offset)); if (gap.size > n) { AddFieldGap(gap.start_offset + n, gap.start_offset + gap.size, gaps); } } else { - DCHECK(IsAligned(field_offset->Uint32Value())); + DCHECK_ALIGNED(field_offset->Uint32Value(), n); field->SetOffset(*field_offset); *field_offset = MemberOffset(field_offset->Uint32Value() + n); } @@ -5174,7 +5174,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle klass, bool is_ field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4)); AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps); } - DCHECK(IsAligned)>(field_offset.Uint32Value())); + DCHECK_ALIGNED(field_offset.Uint32Value(), sizeof(mirror::HeapReference)); grouped_and_sorted_fields.pop_front(); num_reference_fields++; field->SetOffset(field_offset); diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc index bc3ba216e..de4b3f437 100644 --- a/runtime/exception_test.cc +++ b/runtime/exception_test.cc @@ -93,7 +93,7 @@ class ExceptionTest : public CommonRuntimeTest { // NOTE: Don't align the code (it will not be executed) but check that the Thumb2 // adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer(). - CHECK_EQ(mapping_table_offset & 1u, 0u); + CHECK_ALIGNED(mapping_table_offset, 2); const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset]; method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*)); diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 5f915663c..47f9b1b88 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -331,7 +331,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che // If we don't have a potential method, we're outta here. VLOG(signals) << "potential method: " << method_obj; // TODO: Check linear alloc and image. - DCHECK(IsAligned(ArtMethod::ObjectSize(sizeof(void*)))) + DCHECK_ALIGNED(ArtMethod::ObjectSize(sizeof(void*)), sizeof(void*)) << "ArtMethod is not pointer aligned"; if (method_obj == nullptr || !IsAligned(method_obj)) { VLOG(signals) << "no method"; diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h index 436df923c..86266e250 100644 --- a/runtime/gc/accounting/read_barrier_table.h +++ b/runtime/gc/accounting/read_barrier_table.h @@ -51,8 +51,8 @@ class ReadBarrierTable { void Clear(uint8_t* start_addr, uint8_t* end_addr) { DCHECK(IsValidHeapAddr(start_addr)) << start_addr; DCHECK(IsValidHeapAddr(end_addr)) << end_addr; - DCHECK(IsAligned(start_addr)); - DCHECK(IsAligned(end_addr)); + DCHECK_ALIGNED(start_addr, kRegionSize); + DCHECK_ALIGNED(end_addr, kRegionSize); uint8_t* entry_start = EntryFromAddr(start_addr); uint8_t* entry_end = EntryFromAddr(end_addr); memset(reinterpret_cast(entry_start), 0, entry_end - entry_start); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 6546eb424..cdeaa50ce 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -79,7 +79,7 @@ SpaceBitmap* SpaceBitmap::Create( template void SpaceBitmap::SetHeapLimit(uintptr_t new_end) { - DCHECK(IsAligned(new_end)); + DCHECK_ALIGNED(new_end, kBitsPerIntPtrT * kAlignment); size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t); if (new_size < bitmap_size_) { bitmap_size_ = new_size; diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index bd10f7bbf..abaa97f2d 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -63,7 +63,7 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, DCHECK_EQ(RoundUp(capacity, kPageSize), capacity); DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity); CHECK_LE(capacity, max_capacity); - CHECK(IsAligned(page_release_size_threshold_)); + CHECK_ALIGNED(page_release_size_threshold_, kPageSize); if (!initialized_) { Initialize(); } @@ -349,7 +349,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) { fpr->magic_num_ = kMagicNumFree; } fpr->SetByteSize(this, byte_size); - DCHECK(IsAligned(fpr->ByteSize(this))); + DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize); DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end()); if (!free_page_runs_.empty()) { @@ -1567,7 +1567,7 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by FreePageRun* fpr = reinterpret_cast(base_ + i * kPageSize); DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end()); size_t fpr_size = fpr->ByteSize(this); - DCHECK(IsAligned(fpr_size)); + DCHECK_ALIGNED(fpr_size, kPageSize); void* start = fpr; if (kIsDebugBuild) { // In the debug build, the first page of a free page run @@ -1916,7 +1916,7 @@ void RosAlloc::Verify() { CHECK(free_page_runs_.find(fpr) != free_page_runs_.end()) << "An empty page must belong to the free page run set"; size_t fpr_size = fpr->ByteSize(this); - CHECK(IsAligned(fpr_size)) + CHECK_ALIGNED(fpr_size, kPageSize) << "A free page run size isn't page-aligned : " << fpr_size; size_t num_pages = fpr_size / kPageSize; CHECK_GT(num_pages, static_cast(0)) @@ -2163,7 +2163,7 @@ size_t RosAlloc::ReleasePages() { // to the next page. if (free_page_runs_.find(fpr) != free_page_runs_.end()) { size_t fpr_size = fpr->ByteSize(this); - DCHECK(IsAligned(fpr_size)); + DCHECK_ALIGNED(fpr_size, kPageSize); uint8_t* start = reinterpret_cast(fpr); reclaimed_bytes += ReleasePageRange(start, start + fpr_size); size_t pages = fpr_size / kPageSize; diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index b5d5c3484..8bbace956 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1585,7 +1585,7 @@ void ConcurrentCopying::VisitRoots( // Fill the given memory block with a dummy object. Used to fill in a // copy of objects that was lost in race. void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { - CHECK(IsAligned(byte_size)); + CHECK_ALIGNED(byte_size, kObjectAlignment); memset(dummy_obj, 0, byte_size); mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); CHECK(int_array_class != nullptr); @@ -1618,7 +1618,7 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by // Reuse the memory blocks that were copy of objects that were lost in race. mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { // Try to reuse the blocks that were unused due to CAS failures. - CHECK(IsAligned(alloc_size)); + CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); Thread* self = Thread::Current(); size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); MutexLock mu(self, skipped_blocks_lock_); @@ -1637,7 +1637,7 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { // Not found. return nullptr; } - CHECK(IsAligned(it->first - alloc_size)); + CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment); CHECK_GE(it->first - alloc_size, min_object_size) << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; } @@ -1648,7 +1648,7 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { uint8_t* addr = it->second; CHECK_GE(byte_size, alloc_size); CHECK(region_space_->IsInToSpace(reinterpret_cast(addr))); - CHECK(IsAligned(byte_size)); + CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment); if (kVerboseMode) { LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast(addr) << ", " << byte_size; } @@ -1656,7 +1656,7 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { memset(addr, 0, byte_size); if (byte_size > alloc_size) { // Return the remainder to the map. - CHECK(IsAligned(byte_size - alloc_size)); + CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment); CHECK_GE(byte_size - alloc_size, min_object_size); FillWithDummyObject(reinterpret_cast(addr + alloc_size), byte_size - alloc_size); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index e0d6d6b5c..4eb15e295 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -831,8 +831,8 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { // Align up the end address. For example, the image space's end // may not be card-size-aligned. card_end = AlignUp(card_end, accounting::CardTable::kCardSize); - DCHECK(IsAligned(card_begin)); - DCHECK(IsAligned(card_end)); + DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize); + DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize); // Calculate how many bytes of heap we will scan, const size_t address_range = card_end - card_begin; // Calculate how much address range each task gets. diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index 7b19dc93a..a7de44fc9 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -34,7 +34,7 @@ class BitmapSetSlowPathVisitor { void operator()(const mirror::Object* obj) const { CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_"; // Marking a large object, make sure its aligned as a sanity check. - CHECK(IsAligned(obj)); + CHECK_ALIGNED(obj, kPageSize); } private: diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 795d2a2f3..2b94cf169 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1431,10 +1431,10 @@ void Heap::VerifyObjectBody(mirror::Object* obj) { if (UNLIKELY(static_cast(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) { return; } - CHECK(IsAligned(obj)) << "Object isn't aligned: " << obj; + CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned"; mirror::Class* c = obj->GetFieldObject(mirror::Object::ClassOffset()); CHECK(c != nullptr) << "Null class in object " << obj; - CHECK(IsAligned(c)) << "Class " << c << " not aligned in object " << obj; + CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj; CHECK(VerifyClassClass(c)); if (verify_object_mode_ > kVerifyObjectModeFast) { diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h index d9ad9a38c..338a41eaa 100644 --- a/runtime/gc/space/bump_pointer_space-inl.h +++ b/runtime/gc/space/bump_pointer_space-inl.h @@ -63,7 +63,7 @@ inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t } inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) { - DCHECK(IsAligned(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); uint8_t* old_end; uint8_t* new_end; do { diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index a913e596d..2798b21f9 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -440,7 +440,7 @@ size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { AllocationInfo* next_next_info = next_info->GetNextInfo(); // Next next info can't be free since we always coalesce. DCHECK(!next_next_info->IsFree()); - DCHECK(IsAligned(next_next_info->ByteSize())); + DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment); new_free_info = next_next_info; new_free_size += next_next_info->GetPrevFreeBytes(); RemoveFreePrev(next_next_info); diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index b014217fe..3a0d814a2 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -46,8 +46,8 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map, if (create_bitmaps) { size_t bitmap_index = bitmap_index_++; static const uintptr_t kGcCardSize = static_cast(accounting::CardTable::kCardSize); - CHECK(IsAligned(reinterpret_cast(mem_map->Begin()))); - CHECK(IsAligned(reinterpret_cast(mem_map->End()))); + CHECK_ALIGNED(reinterpret_cast(mem_map->Begin()), kGcCardSize); + CHECK_ALIGNED(reinterpret_cast(mem_map->End()), kGcCardSize); live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create( StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast(bitmap_index)), Begin(), NonGrowthLimitCapacity())); @@ -164,10 +164,10 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l // alloc spaces. RevokeAllThreadLocalBuffers(); SetEnd(reinterpret_cast(RoundUp(reinterpret_cast(End()), kPageSize))); - DCHECK(IsAligned(begin_)); - DCHECK(IsAligned(End())); - DCHECK(IsAligned(begin_)); - DCHECK(IsAligned(End())); + DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize); + DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize); + DCHECK_ALIGNED(begin_, kPageSize); + DCHECK_ALIGNED(End(), kPageSize); size_t size = RoundUp(Size(), kPageSize); // Trimming the heap should be done by the caller since we may have invalidated the accounting // stored in between objects. diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 1cdf69dbe..db005f755 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -43,7 +43,7 @@ template inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { - DCHECK(IsAligned(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); mirror::Object* obj; if (LIKELY(num_bytes <= kRegionSize)) { // Non-large object. @@ -115,7 +115,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte size_t* usable_size, size_t* bytes_tl_bulk_allocated) { DCHECK(IsAllocated() && IsInToSpace()); - DCHECK(IsAligned(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); Atomic* atomic_top = reinterpret_cast*>(&top_); uint8_t* old_top; uint8_t* new_top; @@ -266,7 +266,7 @@ template mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { - DCHECK(IsAligned(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); DCHECK_GT(num_bytes, kRegionSize); size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize; DCHECK_GT(num_regs, 0U); diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 814ab6ce9..9a2d0c6d3 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -287,7 +287,7 @@ void RegionSpace::Dump(std::ostream& os) const { void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) { DCHECK(Contains(large_obj)); - DCHECK(IsAligned(large_obj)); + DCHECK_ALIGNED(large_obj, kRegionSize); MutexLock mu(Thread::Current(), region_lock_); uint8_t* begin_addr = reinterpret_cast(large_obj); uint8_t* end_addr = AlignUp(reinterpret_cast(large_obj) + bytes_allocated, kRegionSize); @@ -366,7 +366,7 @@ void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { uint8_t* tlab_start = thread->GetTlabStart(); DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr); if (tlab_start != nullptr) { - DCHECK(IsAligned(tlab_start)); + DCHECK_ALIGNED(tlab_start, kRegionSize); Region* r = RefToRegionLocked(reinterpret_cast(tlab_start)); DCHECK(r->IsAllocated()); DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize); diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index a12a58d48..776b6a352 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -446,10 +446,10 @@ static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& return 3; } const int32_t* keys = reinterpret_cast(&switch_data[2]); - DCHECK(IsAligned<4>(keys)); + DCHECK_ALIGNED(keys, 4); int32_t first_key = keys[0]; const int32_t* targets = reinterpret_cast(&switch_data[4]); - DCHECK(IsAligned<4>(targets)); + DCHECK_ALIGNED(targets, 4); int32_t index = test_val - first_key; if (index >= 0 && index < size) { return targets[index]; @@ -474,9 +474,9 @@ static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& return 3; } const int32_t* keys = reinterpret_cast(&switch_data[2]); - DCHECK(IsAligned<4>(keys)); + DCHECK_ALIGNED(keys, 4); const int32_t* entries = keys + size; - DCHECK(IsAligned<4>(entries)); + DCHECK_ALIGNED(entries, 4); int lo = 0; int hi = size - 1; while (lo <= hi) { diff --git a/runtime/lock_word.h b/runtime/lock_word.h index a290575ba..245f8b8e4 100644 --- a/runtime/lock_word.h +++ b/runtime/lock_word.h @@ -118,7 +118,7 @@ class LockWord { } static LockWord FromForwardingAddress(size_t target) { - DCHECK(IsAligned < 1 << kStateSize>(target)); + DCHECK_ALIGNED(target, (1 << kStateSize)); return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift)); } diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index dbae7f8d9..8df8f96ea 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -585,10 +585,10 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro DCHECK_GE(new_end, Begin()); DCHECK_LE(new_end, End()); DCHECK_LE(begin_ + size_, reinterpret_cast(base_begin_) + base_size_); - DCHECK(IsAligned(begin_)); - DCHECK(IsAligned(base_begin_)); - DCHECK(IsAligned(reinterpret_cast(base_begin_) + base_size_)); - DCHECK(IsAligned(new_end)); + DCHECK_ALIGNED(begin_, kPageSize); + DCHECK_ALIGNED(base_begin_, kPageSize); + DCHECK_ALIGNED(reinterpret_cast(base_begin_) + base_size_, kPageSize); + DCHECK_ALIGNED(new_end, kPageSize); uint8_t* old_end = begin_ + size_; uint8_t* old_base_end = reinterpret_cast(base_begin_) + base_size_; uint8_t* new_base_end = new_end; @@ -603,7 +603,7 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro uint8_t* tail_base_begin = new_base_end; size_t tail_base_size = old_base_end - new_base_end; DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); - DCHECK(IsAligned(tail_base_size)); + DCHECK_ALIGNED(tail_base_size, kPageSize); #ifdef USE_ASHMEM // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are @@ -726,7 +726,7 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { size_t num_gaps = 0; size_t num = 1u; size_t size = map->BaseSize(); - CHECK(IsAligned(size)); + CHECK_ALIGNED(size, kPageSize); void* end = map->BaseEnd(); while (it != maps_end && it->second->GetProtect() == map->GetProtect() && @@ -740,12 +740,12 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { } size_t gap = reinterpret_cast(it->second->BaseBegin()) - reinterpret_cast(end); - CHECK(IsAligned(gap)); + CHECK_ALIGNED(gap, kPageSize); os << "~0x" << std::hex << (gap / kPageSize) << "P"; num = 0u; size = 0u; } - CHECK(IsAligned(it->second->BaseSize())); + CHECK_ALIGNED(it->second->BaseSize(), kPageSize); ++num; size += it->second->BaseSize(); end = it->second->BaseEnd(); diff --git a/runtime/oat.cc b/runtime/oat.cc index 1dd2aad61..5725b6ff6 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -97,7 +97,7 @@ OatHeader::OatHeader(InstructionSet instruction_set, image_file_location_oat_checksum_ = image_file_location_oat_checksum; UpdateChecksum(&image_file_location_oat_checksum_, sizeof(image_file_location_oat_checksum_)); - CHECK(IsAligned(image_file_location_oat_data_begin)); + CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize); image_file_location_oat_data_begin_ = image_file_location_oat_data_begin; UpdateChecksum(&image_file_location_oat_data_begin_, sizeof(image_file_location_oat_data_begin_)); diff --git a/runtime/stack.cc b/runtime/stack.cc index 6f3b0a3bf..fede91c94 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -904,7 +904,7 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); } } - DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U); + DCHECK_ALIGNED(frame_size, kStackAlignment); DCHECK_NE(reg, -1); int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) -- 2.11.0