"CodeGen ",
"ParallelMove ",
"GraphChecker ",
+ "LSE ",
+ "Verifier ",
};
template <bool kCount>
kArenaAllocParallelMoveResolver,
kArenaAllocGraphChecker,
kArenaAllocLSE,
+ kArenaAllocVerifier,
kNumArenaAllocKinds
};
// Pointer to char data, not necessarily zero terminated.
const char* ptr_;
// Length of data.
- size_type length_;
+ size_type length_;
};
// This large function is defined inline so that in a fairly common case where
// VRegA
bool HasVRegA() const;
- int32_t VRegA() const;
+ ALWAYS_INLINE int32_t VRegA() const;
int8_t VRegA_10t() const {
return VRegA_10t(Fetch16(0));
return insn_flags_[index];
}
+inline InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) {
+ return insn_flags_[index];
+}
+
inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
return class_loader_.Get();
}
// On VLOG(verifier), should we dump the whole state when we run into a hard failure?
static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
+ : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
DCHECK_GT(insns_size, 0U);
- register_lines_.reset(new RegisterLine*[insns_size]());
- size_ = insns_size;
+ register_lines_.resize(insns_size);
for (uint32_t i = 0; i < insns_size; i++) {
bool interesting = false;
switch (mode) {
break;
}
if (interesting) {
- register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+ register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
}
}
}
-PcToRegisterLineTable::~PcToRegisterLineTable() {
- for (size_t i = 0; i < size_; i++) {
- delete register_lines_[i];
- if (kIsDebugBuild) {
- register_lines_[i] = nullptr;
- }
- }
-}
+PcToRegisterLineTable::~PcToRegisterLineTable() {}
// Note: returns true on failure.
ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
bool need_precise_constants, bool verify_to_dump,
bool allow_thread_suspension)
: self_(self),
- reg_types_(can_load_classes),
+ arena_stack_(Runtime::Current()->GetArenaPool()),
+ arena_(&arena_stack_),
+ reg_types_(can_load_classes, arena_),
+ reg_table_(arena_),
work_insn_idx_(DexFile::kDexNoIndex),
dex_method_idx_(dex_method_idx),
mirror_method_(method),
}
// Allocate and initialize an array to hold instruction data.
- insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+ insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+ DCHECK(insn_flags_ != nullptr);
+ std::uninitialized_fill_n(insn_flags_.get(),
+ code_item_->insns_size_in_code_units_,
+ InstructionFlags());
// Run through the instructions and see if the width checks out.
bool result = ComputeWidthsAndCountOps();
// Flag instructions guarded by a "try" block and check exception handlers.
break;
}
size_t inst_size = inst->SizeInCodeUnits();
- insn_flags_[dex_pc].SetIsOpcode();
+ GetInstructionFlags(dex_pc).SetIsOpcode();
dex_pc += inst_size;
inst = inst->RelativeAt(inst_size);
}
<< " endAddr=" << end << " (size=" << insns_size << ")";
return false;
}
- if (!insn_flags_[start].IsOpcode()) {
+ if (!GetInstructionFlags(start).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "'try' block starts inside an instruction (" << start << ")";
return false;
uint32_t dex_pc = start;
const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
while (dex_pc < end) {
- insn_flags_[dex_pc].SetInTry();
+ GetInstructionFlags(dex_pc).SetInTry();
size_t insn_size = inst->SizeInCodeUnits();
dex_pc += insn_size;
inst = inst->RelativeAt(insn_size);
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t dex_pc= iterator.GetHandlerAddress();
- if (!insn_flags_[dex_pc].IsOpcode()) {
+ if (!GetInstructionFlags(dex_pc).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "exception handler starts at bad address (" << dex_pc << ")";
return false;
<< "exception handler begins with move-result* (" << dex_pc << ")";
return false;
}
- insn_flags_[dex_pc].SetBranchTarget();
+ GetInstructionFlags(dex_pc).SetBranchTarget();
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
const Instruction* inst = Instruction::At(code_item_->insns_);
/* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
- insn_flags_[0].SetBranchTarget();
- insn_flags_[0].SetCompileTimeInfoPoint();
+ GetInstructionFlags(0).SetBranchTarget();
+ GetInstructionFlags(0).SetCompileTimeInfoPoint();
uint32_t insns_size = code_item_->insns_size_in_code_units_;
for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
// All invoke points are marked as "Throw" points already.
// We are relying on this to also count all the invokes as interesting.
if (inst->IsBranch()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
// The compiler also needs safepoints for fall-through to loop heads.
// Such a loop head must be a target of a branch.
int32_t offset = 0;
bool cond, self_ok;
bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
DCHECK(target_ok);
- insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
} else if (inst->IsSwitch() || inst->IsThrow()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
} else if (inst->IsReturn()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
}
dex_pc += inst->SizeInCodeUnits();
inst = inst->Next();
}
// Make sure the array-data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) {
+ if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
<< ", data offset " << array_data_offset
<< " not correctly visited, probably bad padding.";
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
(uint32_t) abs_offset >= insn_count ||
- !insn_flags_[abs_offset].IsOpcode()) {
+ !GetInstructionFlags(abs_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
<< reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset);
return false;
}
- insn_flags_[abs_offset].SetBranchTarget();
+ GetInstructionFlags(abs_offset).SetBranchTarget();
return true;
}
}
// Make sure the switch data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) {
+ if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
<< ", switch offset " << switch_offset
<< " not correctly visited, probably bad padding.";
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
abs_offset >= static_cast<int32_t>(insn_count) ||
- !insn_flags_[abs_offset].IsOpcode()) {
+ !GetInstructionFlags(abs_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset)
<< "[" << targ << "]";
return false;
}
- insn_flags_[abs_offset].SetBranchTarget();
+ GetInstructionFlags(abs_offset).SetBranchTarget();
}
return true;
}
registers_size,
this);
-
work_line_.reset(RegisterLine::Create(registers_size, this));
saved_line_.reset(RegisterLine::Create(registers_size, this));
vios->Stream() << reg_line->Dump(this) << "\n";
}
vios->Stream()
- << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+ << StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
const bool kDumpHexOfInstruction = false;
if (kDumpHexOfInstruction) {
vios->Stream() << inst->DumpHex(5) << " ";
const uint32_t insns_size = code_item_->insns_size_in_code_units_;
/* Begin by marking the first instruction as "changed". */
- insn_flags_[0].SetChanged();
+ GetInstructionFlags(0).SetChanged();
uint32_t start_guess = 0;
/* Continue until no instructions are marked "changed". */
// Find the first marked one. Use "start_guess" as a way to find one quickly.
uint32_t insn_idx = start_guess;
for (; insn_idx < insns_size; insn_idx++) {
- if (insn_flags_[insn_idx].IsChanged())
+ if (GetInstructionFlags(insn_idx).IsChanged())
break;
}
if (insn_idx == insns_size) {
// situation where we have a stray "changed" flag set on an instruction that isn't a branch
// target.
work_insn_idx_ = insn_idx;
- if (insn_flags_[insn_idx].IsBranchTarget()) {
+ if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
} else if (kIsDebugBuild) {
/*
return false;
}
/* Clear "changed" and mark as visited. */
- insn_flags_[insn_idx].SetVisited();
- insn_flags_[insn_idx].ClearChanged();
+ GetInstructionFlags(insn_idx).SetVisited();
+ GetInstructionFlags(insn_idx).ClearChanged();
}
if (gDebugVerify) {
(insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
- insn_flags_[insn_idx].SetVisited();
+ GetInstructionFlags(insn_idx).SetVisited();
}
- if (!insn_flags_[insn_idx].IsVisited()) {
+ if (!GetInstructionFlags(insn_idx).IsVisited()) {
if (dead_start < 0)
dead_start = insn_idx;
} else if (dead_start >= 0) {
// We need to ensure the work line is consistent while performing validation. When we spot a
// peephole pattern we compute a new line for either the fallthrough instruction or the
// branch target.
- std::unique_ptr<RegisterLine> branch_line;
- std::unique_ptr<RegisterLine> fallthrough_line;
+ ArenaUniquePtr<RegisterLine> branch_line;
+ ArenaUniquePtr<RegisterLine> fallthrough_line;
switch (inst->Opcode()) {
case Instruction::NOP:
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
// Check whether the previous instruction is a move-object with vAA as a source, creating
// untracked lock aliasing.
- if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+ if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
uint32_t prev_idx = work_insn_idx_ - 1;
- while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+ while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
prev_idx--;
}
const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
uint32_t instance_of_idx = 0;
if (0 != work_insn_idx_) {
instance_of_idx = work_insn_idx_ - 1;
- while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+ while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
instance_of_idx--;
}
- if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+ if (FailOrAbort(this, GetInstructionFlags(instance_of_idx).IsOpcode(),
"Unable to get previous instruction of if-eqz/if-nez for work index ",
work_insn_idx_)) {
break;
update_line->SetRegisterType<LockOp::kKeep>(this,
instance_of_inst->VRegB_22c(),
cast_type);
- if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+ if (!GetInstructionFlags(instance_of_idx).IsBranchTarget() && 0 != instance_of_idx) {
// See if instance-of was preceded by a move-object operation, common due to the small
// register encoding space of instance-of, and propagate type information to the source
// of the move-object.
uint32_t move_idx = instance_of_idx - 1;
- while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+ while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
move_idx--;
}
- if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+ if (FailOrAbort(this, GetInstructionFlags(move_idx).IsOpcode(),
"Unable to get previous instruction of if-eqz/if-nez for work index ",
work_insn_idx_)) {
break;
work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
}
if (return_type == nullptr) {
- return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
- false);
+ return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
}
if (!return_type->IsLowHalf()) {
work_line_->SetResultRegisterType(this, *return_type);
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
return false;
}
/* update branch target, set "changed" if appropriate */
- if (nullptr != branch_line.get()) {
+ if (nullptr != branch_line) {
if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
return false;
}
* Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
* "try" block when they throw, control transfers out of the method.)
*/
- if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+ if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
bool has_catch_all_handler = false;
CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
return false;
}
- if (nullptr != fallthrough_line.get()) {
+ if (nullptr != fallthrough_line) {
// Make workline consistent with fallthrough computed from peephole optimization.
work_line_->CopyFromLine(fallthrough_line.get());
}
- if (insn_flags_[next_insn_idx].IsReturn()) {
+ if (GetInstructionFlags(next_insn_idx).IsReturn()) {
// For returns we only care about the operand to the return, all other registers are dead.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
AdjustReturnLine(this, ret_inst, work_line_.get());
* We're not recording register data for the next instruction, so we don't know what the
* prior state was. We have to assume that something has changed and re-evaluate it.
*/
- insn_flags_[next_insn_idx].SetChanged();
+ GetInstructionFlags(next_insn_idx).SetChanged();
}
}
}
DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
- DCHECK(insn_flags_[*start_guess].IsOpcode());
+ DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
if (have_pending_runtime_throw_failure_) {
have_any_pending_runtime_throw_failure_ = true;
return true;
} // NOLINT(readability/fn_size)
+void MethodVerifier::UninstantiableError(const char* descriptor) {
+ Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+ << "non-instantiable klass " << descriptor;
+}
+
+inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
+ return klass->IsInstantiable() || klass->IsPrimitive();
+}
+
const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
- const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- const RegType& referrer = GetDeclaringClass();
mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
- const RegType& result = klass != nullptr ?
- FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
- reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
- if (result.IsConflict()) {
+ const RegType* result = nullptr;
+ if (klass != nullptr) {
+ bool precise = klass->CannotBeAssignedFromOtherTypes();
+ if (precise && !IsInstantiableOrPrimitive(klass)) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ UninstantiableError(descriptor);
+ precise = false;
+ }
+ result = reg_types_.FindClass(klass, precise);
+ if (result == nullptr) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ result = reg_types_.InsertClass(descriptor, klass, precise);
+ }
+ } else {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ result = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ }
+ DCHECK(result != nullptr);
+ if (result->IsConflict()) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
- << "' in " << referrer;
- return result;
+ << "' in " << GetDeclaringClass();
+ return *result;
}
- if (klass == nullptr && !result.IsUnresolvedTypes()) {
- dex_cache_->SetResolvedType(class_idx, result.GetClass());
+ if (klass == nullptr && !result->IsUnresolvedTypes()) {
+ dex_cache_->SetResolvedType(class_idx, result->GetClass());
}
// Check if access is allowed. Unresolved types use xxxWithAccessCheck to
// check at runtime if access is allowed and so pass here. If result is
// primitive, skip the access check.
- if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
- !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
- Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
- << referrer << "' -> '" << result << "'";
+ if (result->IsNonZeroReferenceTypes() && !result->IsUnresolvedTypes()) {
+ const RegType& referrer = GetDeclaringClass();
+ if (!referrer.IsUnresolvedTypes() && !referrer.CanAccess(*result)) {
+ Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+ << referrer << "' -> '" << result << "'";
+ }
}
- return result;
+ return *result;
}
const RegType& MethodVerifier::GetCaughtExceptionType() {
} else {
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
- res_method_class = ®_types_.FromDescriptor(GetClassLoader(),
- dex_file_->StringByTypeIdx(class_idx),
- false);
+ res_method_class = ®_types_.FromDescriptor(
+ GetClassLoader(),
+ dex_file_->StringByTypeIdx(class_idx),
+ false);
}
if (!res_method_class->IsAssignableFrom(actual_arg_type)) {
Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS:
field->GetType<false>();
if (field_type_class != nullptr) {
- field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+ field_type = &FromClass(field->GetTypeDescriptor(),
+ field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
DCHECK(!can_load_classes_ || self->IsExceptionPending());
self->ClearException();
field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
- field->GetTypeDescriptor(), false);
+ field->GetTypeDescriptor(),
+ false);
}
if (field_type == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
bool update_merge_line) {
bool changed = true;
RegisterLine* target_line = reg_table_.GetLine(next_insn);
- if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+ if (!GetInstructionFlags(next_insn).IsVisitedOrChanged()) {
/*
* We haven't processed this instruction before, and we haven't touched the registers here, so
* there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
* only way a register can transition out of "unknown", so this is not just an optimization.)
*/
target_line->CopyFromLine(merge_line);
- if (insn_flags_[next_insn].IsReturn()) {
+ if (GetInstructionFlags(next_insn).IsReturn()) {
// Verify that the monitor stack is empty on return.
merge_line->VerifyMonitorStackEmpty(this);
AdjustReturnLine(this, ret_inst, target_line);
}
} else {
- std::unique_ptr<RegisterLine> copy(gDebugVerify ?
- RegisterLine::Create(target_line->NumRegs(), this) :
- nullptr);
+ ArenaUniquePtr<RegisterLine> copy;
if (gDebugVerify) {
+ copy.reset(RegisterLine::Create(target_line->NumRegs(), this));
copy->CopyFromLine(target_line);
}
changed = target_line->MergeRegisters(this, merge_line);
}
}
if (changed) {
- insn_flags_[next_insn].SetChanged();
+ GetInstructionFlags(next_insn).SetChanged();
}
return true;
}
InstructionFlags* MethodVerifier::CurrentInsnFlags() {
- return &insn_flags_[work_insn_idx_];
+ return &GetInstructionFlags(work_insn_idx_);
}
const RegType& MethodVerifier::GetMethodReturnType() {
= dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
if (mirror_method_ != nullptr) {
mirror::Class* klass = mirror_method_->GetDeclaringClass();
- declaring_class_ = &FromClass(descriptor, klass,
- klass->CannotBeAssignedFromOtherTypes());
+ declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
} else {
declaring_class_ = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
}
#include <sstream>
#include <vector>
+#include "base/arena_allocator.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
#include "dex_file.h"
#include "handle.h"
#include "instruction_flags.h"
// execution of that instruction.
class PcToRegisterLineTable {
public:
- PcToRegisterLineTable() : size_(0) {}
+ explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
~PcToRegisterLineTable();
// Initialize the RegisterTable. Every instruction address can have a different set of information
void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
uint16_t registers_size, MethodVerifier* verifier);
- RegisterLine* GetLine(size_t idx) {
- DCHECK_LT(idx, size_);
- return register_lines_[idx];
+ RegisterLine* GetLine(size_t idx) const {
+ return register_lines_[idx].get();
}
private:
- std::unique_ptr<RegisterLine*[]> register_lines_;
- size_t size_;
+ ScopedArenaVector<ArenaUniquePtr<RegisterLine>> register_lines_;
DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
};
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
RegisterLine* GetRegLine(uint32_t dex_pc);
- const InstructionFlags& GetInstructionFlags(size_t index) const;
+ ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
+ ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
MethodReference GetMethodReference() const;
return IsConstructor() && !IsStatic();
}
+ ScopedArenaAllocator& GetArena() {
+ return arena_;
+ }
+
private:
+ void UninstantiableError(const char* descriptor);
+ static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
// Is the method being verified a constructor? See the comment on the field.
bool IsConstructor() const {
return is_constructor_;
// The thread we're verifying on.
Thread* const self_;
+ // Arena allocator.
+ ArenaStack arena_stack_;
+ ScopedArenaAllocator arena_;
+
RegTypeCache reg_types_;
PcToRegisterLineTable reg_table_;
// Storage for the register status we're currently working on.
- std::unique_ptr<RegisterLine> work_line_;
+ ArenaUniquePtr<RegisterLine> work_line_;
// The address of the instruction we're currently working on, note that this is in 2 byte
// quantities
uint32_t work_insn_idx_;
// Storage for the register status we're saving for later.
- std::unique_ptr<RegisterLine> saved_line_;
+ ArenaUniquePtr<RegisterLine> saved_line_;
const uint32_t dex_method_idx_; // The method we're working on.
// Its object representation if known.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
- std::unique_ptr<InstructionFlags[]> insn_flags_;
+ // Owned, but not unique_ptr since insn_flags_ are allocated in arenas.
+ ArenaUniquePtr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
#include "reg_type.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "mirror/class.h"
namespace art {
return instance_;
}
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kArenaAllocMisc);
+}
+
} // namespace verifier
} // namespace art
#include "reg_type-inl.h"
+#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/casts.h"
#include "class_linker-inl.h"
const DoubleHiType* DoubleHiType::instance_ = nullptr;
const IntegerType* IntegerType::instance_ = nullptr;
-PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
CHECK(klass != nullptr);
CHECK(!descriptor.empty());
}
-Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: PrimitiveType(klass, descriptor, cache_id) {
}
-Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: PrimitiveType(klass, descriptor, cache_id) {
}
}
std::string IntegerType::Dump() const {
- return "Integer";
+ return "Integer";
}
const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new DoubleHiType(klass, descriptor, cache_id);
}
const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new DoubleLoType(klass, descriptor, cache_id);
}
}
-const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new LongLoType(klass, descriptor, cache_id);
return instance_;
}
-const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new LongHiType(klass, descriptor, cache_id);
}
}
-const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new FloatType(klass, descriptor, cache_id);
}
}
-const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new CharType(klass, descriptor, cache_id);
}
}
-const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ShortType(klass, descriptor, cache_id);
}
}
-const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ByteType(klass, descriptor, cache_id);
}
}
-const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new IntegerType(klass, descriptor, cache_id);
}
const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ConflictType(klass, descriptor, cache_id);
}
}
-const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(BooleanType::instance_ == nullptr);
instance_ = new BooleanType(klass, descriptor, cache_id);
}
const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new UndefinedType(klass, descriptor, cache_id);
}
}
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
// Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
std::string UnresolvedReferenceType::Dump() const {
std::stringstream result;
- result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
+ result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
return result.str();
}
std::string UnresolvedUninitializedRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": "
- << PrettyDescriptor(GetDescriptor().c_str())
+ << PrettyDescriptor(GetDescriptor().as_string().c_str())
<< " Allocation PC: " << GetAllocationPc();
return result.str();
}
std::string UnresolvedUninitializedThisRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference"
- << PrettyDescriptor(GetDescriptor().c_str());
+ << PrettyDescriptor(GetDescriptor().as_string().c_str());
return result.str();
}
#include <set>
#include <string>
+#include "base/arena_object.h"
#include "base/bit_vector.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/stringpiece.h"
#include "gc_root.h"
#include "handle_scope.h"
#include "object_callbacks.h"
class Class;
} // namespace mirror
+class ArenaBitVector;
+class ScopedArenaAllocator;
+
namespace verifier {
class RegTypeCache;
bool IsJavaLangObjectArray() const
SHARED_REQUIRES(Locks::mutator_lock_);
bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string& GetDescriptor() const {
+ const StringPiece& GetDescriptor() const {
DCHECK(HasClass() ||
(IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ static void* operator new(size_t size) noexcept {
+ return ::operator new(size);
+ }
+
+ static void* operator new(size_t size, ArenaAllocator* arena) = delete;
+ static void* operator new(size_t size, ScopedArenaAllocator* arena);
+
protected:
- RegType(mirror::Class* klass, const std::string& descriptor,
+ RegType(mirror::Class* klass,
+ const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ : descriptor_(descriptor),
+ klass_(klass),
+ cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string descriptor_;
- mutable GcRoot<mirror::Class>
- klass_; // Non-const only due to moving classes.
+ const StringPiece descriptor_;
+ mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
const uint16_t cache_id_;
friend class RegTypeCache;
// Create the singleton instance.
static const ConflictType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static void Destroy();
private:
- ConflictType(mirror::Class* klass, const std::string& descriptor,
+ ConflictType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
// Create the singleton instance.
static const UndefinedType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static void Destroy();
private:
- UndefinedType(mirror::Class* klass, const std::string& descriptor,
+ UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
class PrimitiveType : public RegType {
public:
- PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+ PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasClassVirtual() const OVERRIDE { return true; }
class Cat1Type : public PrimitiveType {
public:
- Cat1Type(mirror::Class* klass, const std::string& descriptor,
+ Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
bool IsInteger() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* GetInstance() PURE;
static void Destroy();
private:
- IntegerType(mirror::Class* klass, const std::string& descriptor,
+ IntegerType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const IntegerType* instance_;
bool IsBoolean() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* GetInstance() PURE;
static void Destroy();
private:
- BooleanType(mirror::Class* klass, const std::string& descriptor,
+ BooleanType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
bool IsByte() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* GetInstance() PURE;
static void Destroy();
private:
- ByteType(mirror::Class* klass, const std::string& descriptor,
+ ByteType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ByteType* instance_;
bool IsShort() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* GetInstance() PURE;
static void Destroy();
private:
- ShortType(mirror::Class* klass, const std::string& descriptor,
+ ShortType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ShortType* instance_;
bool IsChar() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* GetInstance() PURE;
static void Destroy();
private:
- CharType(mirror::Class* klass, const std::string& descriptor,
+ CharType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const CharType* instance_;
bool IsFloat() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* GetInstance() PURE;
static void Destroy();
private:
- FloatType(mirror::Class* klass, const std::string& descriptor,
+ FloatType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const FloatType* instance_;
class Cat2Type : public PrimitiveType {
public:
- Cat2Type(mirror::Class* klass, const std::string& descriptor,
+ Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
bool IsLongLo() const OVERRIDE { return true; }
bool IsLong() const OVERRIDE { return true; }
static const LongLoType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const LongLoType* GetInstance() PURE;
static void Destroy();
private:
- LongLoType(mirror::Class* klass, const std::string& descriptor,
+ LongLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongLoType* instance_;
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
bool IsLongHi() const OVERRIDE { return true; }
static const LongHiType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const LongHiType* GetInstance() PURE;
static void Destroy();
private:
- LongHiType(mirror::Class* klass, const std::string& descriptor,
+ LongHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongHiType* instance_;
bool IsDoubleLo() const OVERRIDE { return true; }
bool IsDouble() const OVERRIDE { return true; }
static const DoubleLoType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
private:
- DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+ DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleLoType* instance_;
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsDoubleHi() const OVERRIDE { return true; }
static const DoubleHiType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
private:
- DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+ DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleHiType* instance_;
// instructions and must be passed to a constructor.
class UninitializedType : public RegType {
public:
- UninitializedType(mirror::Class* klass, const std::string& descriptor,
+ UninitializedType(mirror::Class* klass, const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
: RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
class UninitializedReferenceType FINAL : public UninitializedType {
public:
UninitializedReferenceType(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
// constructor.
class UnresolvedUninitializedRefType FINAL : public UninitializedType {
public:
- UnresolvedUninitializedRefType(const std::string& descriptor,
+ UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
class UninitializedThisReferenceType FINAL : public UninitializedType {
public:
UninitializedThisReferenceType(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
public:
- UnresolvedUninitializedThisRefType(const std::string& descriptor,
+ UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
// sub-class.
class ReferenceType FINAL : public RegType {
public:
- ReferenceType(mirror::Class* klass, const std::string& descriptor,
+ ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
// type.
class PreciseReferenceType FINAL : public RegType {
public:
- PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+ PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
// Common parent of unresolved types.
class UnresolvedType : public RegType {
public:
- UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+ UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
// of this type must be conservative.
class UnresolvedReferenceType FINAL : public UnresolvedType {
public:
- UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+ UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
if (kIsDebugBuild) {
class UnresolvedMergedType FINAL : public UnresolvedType {
public:
// Note: the constructor will copy the unresolved BitVector, not use it directly.
- UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
- const RegTypeCache* reg_type_cache, uint16_t cache_id)
+ UnresolvedMergedType(const RegType& resolved,
+ const BitVector& unresolved,
+ const RegTypeCache* reg_type_cache,
+ uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
// The resolved part. See description below.
}
}
+template <class RegTypeType>
+inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) {
+ DCHECK(new_entry != nullptr);
+ entries_.push_back(new_entry);
+ if (new_entry->HasClass()) {
+ mirror::Class* klass = new_entry->GetClass();
+ DCHECK(!klass->IsPrimitive());
+ klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry));
+ }
+ return *new_entry;
+}
+
} // namespace verifier
} // namespace art
#endif // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
#include "reg_type_cache-inl.h"
+#include "base/arena_bit_vector.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
bool RegTypeCache::primitive_initialized_ = false;
uint16_t RegTypeCache::primitive_count_ = 0;
-const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
+ kMinSmallConstant + 1];
-static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
+ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (entry->IsPreciseReference() == precise) {
// We were or weren't looking for a precise reference and we found what we need.
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader,
+ const char* descriptor,
bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
return klass;
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
+ char* ptr = arena_.AllocArray<char>(string_piece.length());
+ memcpy(ptr, string_piece.data(), string_piece.length());
+ return StringPiece(ptr, string_piece.length());
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader,
+ const char* descriptor,
bool precise) {
+ StringPiece sp_descriptor(descriptor);
// Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
// operations on the descriptor.
- StringPiece descriptor_sp(descriptor);
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- if (MatchDescriptor(i, descriptor_sp, precise)) {
+ if (MatchDescriptor(i, sp_descriptor, precise)) {
return *(entries_[i]);
}
}
if (klass->CannotBeAssignedFromOtherTypes() || precise) {
DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
DCHECK(!klass->IsInterface());
- entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+ entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
} else {
- entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+ entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
} else { // Class not resolved.
// We tried loading the class and failed, this might get an exception raised
// so we want to clear it before we go on.
DCHECK(!Thread::Current()->IsExceptionPending());
}
if (IsValidDescriptor(descriptor)) {
- RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(
+ new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
// could be done at runtime
}
}
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
// primitive classes are final.
- return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
- } else {
- // Look for the reference in the list of entries to have.
- for (size_t i = primitive_count_; i < entries_.size(); i++) {
- const RegType* cur_entry = entries_[i];
- if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
- return *cur_entry;
+ return &RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+ }
+ for (auto& pair : klass_entries_) {
+ mirror::Class* const reg_klass = pair.first.Read();
+ if (reg_klass == klass) {
+ const RegType* reg_type = pair.second;
+ if (MatchingPrecisionForClass(reg_type, precise)) {
+ return reg_type;
}
}
- // No reference to the class was found, create new reference.
- RegType* entry;
- if (precise) {
- entry = new PreciseReferenceType(klass, descriptor, entries_.size());
- } else {
- entry = new ReferenceType(klass, descriptor, entries_.size());
- }
- AddEntry(entry);
- return *entry;
}
+ return nullptr;
}
-RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+ mirror::Class* klass,
+ bool precise) {
+ // No reference to the class was found, create new reference.
+ DCHECK(FindClass(klass, precise) == nullptr);
+ RegType* const reg_type = precise
+ ? static_cast<RegType*>(
+ new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
+ : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+ return &AddEntry(reg_type);
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+ DCHECK(klass != nullptr);
+ const RegType* reg_type = FindClass(klass, precise);
+ if (reg_type == nullptr) {
+ reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+ }
+ return *reg_type;
+}
+
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
+ : entries_(arena.Adapter(kArenaAllocVerifier)),
+ klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+ can_load_classes_(can_load_classes),
+ arena_(arena) {
if (kIsDebugBuild) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
- entries_.reserve(64);
+ // The klass_entries_ array does not have primitives or small constants.
+ static constexpr size_t kNumReserveEntries = 32;
+ klass_entries_.reserve(kNumReserveEntries);
+ // We want to have room for additional entries after inserting primitives and small
+ // constants.
+ entries_.reserve(kNumReserveEntries + kNumPrimitivesAndSmallConstants);
FillPrimitiveAndSmallConstantTypes();
}
RegTypeCache::~RegTypeCache() {
- CHECK_LE(primitive_count_, entries_.size());
- // Delete only the non primitive types.
- if (entries_.size() == kNumPrimitivesAndSmallConstants) {
- // All entries are from the global pool, nothing to delete.
- return;
- }
- std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
- std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
- STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+ DCHECK_LE(primitive_count_, entries_.size());
}
void RegTypeCache::ShutDown() {
}
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
- BitVector types(1, // Allocate at least a word.
- true, // Is expandable.
- Allocator::GetMallocAllocator()); // TODO: Arenas in the verifier.
+ ArenaBitVector types(&arena_,
+ kDefaultArenaBitVectorBytes * kBitsPerByte, // Allocate at least 8 bytes.
+ true); // Is expandable.
const RegType* left_resolved;
if (left.IsUnresolvedMergedReference()) {
const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
// Use SameBitsSet. "types" is expandable to allow merging in the components, but the
// BitVector in the final RegType will be made non-expandable.
- if (&resolved_part == &resolved_parts_merged &&
- types.SameBitsSet(&unresolved_part)) {
+ if (&resolved_part == &resolved_parts_merged && types.SameBitsSet(&unresolved_part)) {
return *cur_entry;
}
}
}
-
- // Create entry.
- RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
- types,
- this,
- entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
+ types,
+ this,
+ entries_.size()));
}
const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
}
}
}
- RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
}
const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = nullptr;
- const std::string& descriptor(type.GetDescriptor());
+ const StringPiece& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
}
}
- entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+ entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
+ allocation_pc,
+ entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
return *down_cast<const UninitializedReferenceType*>(cur_entry);
}
}
- entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+ entry = new (&arena_) UninitializedReferenceType(klass,
+ descriptor,
+ allocation_pc,
+ entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
- const std::string& descriptor(uninit_type.GetDescriptor());
+ const StringPiece& descriptor(uninit_type.GetDescriptor());
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedReference() &&
return *cur_entry;
}
}
- entry = new UnresolvedReferenceType(descriptor, entries_.size());
+ entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
return *cur_entry;
}
}
- entry = new ReferenceType(klass, "", entries_.size());
+ entry = new (&arena_) ReferenceType(klass, "", entries_.size());
} else if (!klass->IsPrimitive()) {
// We're uninitialized because of allocation, look or create a precise type as allocations
// may only create objects of that type.
return *cur_entry;
}
}
- entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+ entry = new (&arena_) PreciseReferenceType(klass,
+ uninit_type.GetDescriptor(),
+ entries_.size());
} else {
return Conflict();
}
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
UninitializedType* entry;
- const std::string& descriptor(type.GetDescriptor());
+ const StringPiece& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+ entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+ entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstType(value, entries_.size());
+ entry = new (&arena_) PreciseConstType(value, entries_.size());
} else {
- entry = new ImpreciseConstType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstLoType(value, entries_.size());
+ entry = new (&arena_) PreciseConstLoType(value, entries_.size());
} else {
- entry = new ImpreciseConstLoType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstHiType(value, entries_.size());
+ entry = new (&arena_) PreciseConstHiType(value, entries_.size());
} else {
- entry = new ImpreciseConstHiType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
- const std::string& descriptor(array.GetDescriptor());
- const std::string component(descriptor.substr(1, descriptor.size() - 1));
- return FromDescriptor(loader, component.c_str(), false);
+ const std::string descriptor(array.GetDescriptor().as_string());
+ return FromDescriptor(loader, descriptor.c_str() + 1, false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
std::string temp;
+ const char* descriptor = klass->GetDescriptor(&temp);
if (klass->IsErroneous()) {
// Arrays may have erroneous component types, use unresolved in that case.
// We assume that the primitive classes are not erroneous, so we know it is a
// reference type.
- return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
+ return FromDescriptor(loader, descriptor, false);
} else {
- return FromClass(klass->GetDescriptor(&temp), klass,
- klass->CannotBeAssignedFromOtherTypes());
+ return FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
}
}
}
for (size_t i = primitive_count_; i < entries_.size(); ++i) {
entries_[i]->VisitRoots(visitor, root_info);
}
-}
-
-void RegTypeCache::AddEntry(RegType* new_entry) {
- entries_.push_back(new_entry);
+ for (auto& pair : klass_entries_) {
+ GcRoot<mirror::Class>& root = pair.first;
+ root.VisitRoot(visitor, root_info);
+ }
}
} // namespace verifier
#include "base/casts.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
#include "object_callbacks.h"
#include "reg_type.h"
#include "runtime.h"
class Class;
class ClassLoader;
} // namespace mirror
+class ScopedArenaAllocator;
class StringPiece;
namespace verifier {
class RegType;
+// Use 8 bytes since that is the default arena allocator alignment.
+static constexpr size_t kDefaultArenaBitVectorBytes = 8;
+
class RegTypeCache {
public:
- explicit RegTypeCache(bool can_load_classes);
+ explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
~RegTypeCache();
static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Find a RegType, returns null if not found.
+ const RegType* FindClass(mirror::Class* klass, bool precise) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Insert a new class with a specified descriptor, must not already be in the cache.
+ const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Get or insert a reg type for a description, klass, and precision.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat1Const(int32_t value, bool precise)
const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
- void AddEntry(RegType* new_entry);
+ // Returns the pass in RegType.
+ template <class RegTypeType>
+ RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Add a string piece to the arena allocator so that it stays live for the lifetime of the
+ // verifier.
+ StringPiece AddString(const StringPiece& string_piece);
template <class Type>
static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
// A quick look up for popular small constants.
static constexpr int32_t kMinSmallConstant = -1;
static constexpr int32_t kMaxSmallConstant = 4;
- static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+ static const PreciseConstType* small_precise_constants_[kMaxSmallConstant -
+ kMinSmallConstant + 1];
static constexpr size_t kNumPrimitivesAndSmallConstants =
12 + (kMaxSmallConstant - kMinSmallConstant + 1);
static uint16_t primitive_count_;
// The actual storage for the RegTypes.
- std::vector<const RegType*> entries_;
+ ScopedArenaVector<const RegType*> entries_;
+
+ // Fast lookup for quickly finding entries that have a matching class.
+ ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
// Whether or not we're allowed to load classes.
const bool can_load_classes_;
+ // Arena allocator.
+ ScopedArenaAllocator& arena_;
+
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};
#include "base/bit_vector.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "common_runtime_test.h"
#include "reg_type_cache-inl.h"
#include "reg_type-inl.h"
namespace art {
namespace verifier {
-class RegTypeTest : public CommonRuntimeTest {};
+class BaseRegTypeTest : public CommonRuntimeTest {
+ public:
+ void PostRuntimeCreate() OVERRIDE {
+ stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool()));
+ allocator.reset(new ScopedArenaAllocator(stack.get()));
+ }
+
+ std::unique_ptr<ArenaStack> stack;
+ std::unique_ptr<ScopedArenaAllocator> allocator;
+};
+
+class RegTypeTest : public BaseRegTypeTest {};
TEST_F(RegTypeTest, ConstLoHi) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
TEST_F(RegTypeTest, Pairs) {
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
int64_t val = static_cast<int32_t>(1234);
const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
TEST_F(RegTypeTest, Primitives) {
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
EXPECT_TRUE(double_reg_type.HasClass());
}
-class RegTypeReferenceTest : public CommonRuntimeTest {};
+class RegTypeReferenceTest : public BaseRegTypeTest {};
TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
// Tests matching precisions. A reference type that was created precise doesn't
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& imprecise_obj = cache.JavaLangObject(false);
const RegType& precise_obj = cache.JavaLangObject(true);
const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
// Tests creating unresolved types. Miss for the first time asking the cache and
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
TEST_F(RegTypeReferenceTest, Dump) {
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
const RegType& resolved_ref = cache.JavaLangString();
// Hit the second time. Then check for the same effect when using
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type = cache.JavaLangString();
const RegType& ref_type_2 = cache.JavaLangString();
const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
// Hit the second time. Then I am checking for the same effect when using
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type = cache.JavaLangObject(true);
const RegType& ref_type_2 = cache.JavaLangObject(true);
const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
// Tests merging logic
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
const RegType& string = cache_new.JavaLangString();
const RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
TEST_F(RegTypeTest, MergingFloat) {
// Testing merging logic with float and float constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& float_type = cache_new.Float();
TEST_F(RegTypeTest, MergingLong) {
// Testing merging logic with long and long constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& long_lo_type = cache_new.LongLo();
TEST_F(RegTypeTest, MergingDouble) {
// Testing merging logic with double and double constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& double_lo_type = cache_new.DoubleLo();
TEST_F(RegTypeTest, ConstPrecision) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
const RegType& precise_const = cache_new.FromCat1Const(10, true);
}
}
+inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
+ void* memory = verifier->GetArena().Alloc(OFFSETOF_MEMBER(RegisterLine, line_) +
+ (num_regs * sizeof(uint16_t)));
+ return new (memory) RegisterLine(num_regs, verifier);
+}
+
+inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
+ : num_regs_(num_regs),
+ monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ this_initialized_(false) {
+ std::uninitialized_fill_n(line_, num_regs_, 0u);
+ SetResultTypeToUnknown(verifier);
+}
+
} // namespace verifier
} // namespace art
}
}
-// Check whether there is another register in the search map that is locked the same way as the
-// register in the src map. This establishes an alias.
-static bool FindLockAliasedRegister(
- uint32_t src,
- const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
- const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+bool FindLockAliasedRegister(uint32_t src,
+ const RegisterLine::RegToLockDepthsMap& src_map,
+ const RegisterLine::RegToLockDepthsMap& search_map) {
auto it = src_map.find(src);
if (it == src_map.end()) {
// "Not locked" is trivially aliased.
#include <memory>
#include <vector>
+#include "base/scoped_arena_containers.h"
#include "safe_map.h"
namespace art {
// stack of entered monitors (identified by code unit offset).
class RegisterLine {
public:
- static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
- void* memory = operator new(sizeof(RegisterLine) + (num_regs * sizeof(uint16_t)));
- RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
- return rl;
- }
+ // A map from register to a bit vector of indices into the monitors_ stack.
+ using RegToLockDepthsMap = ScopedArenaSafeMap<uint32_t, uint32_t>;
+
+ // Create a register line of num_regs registers.
+ static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier);
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
// Write a bit at each register location that holds a reference.
void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
- size_t GetMonitorEnterCount() {
+ size_t GetMonitorEnterCount() const {
return monitors_.size();
}
- uint32_t GetMonitorEnterDexPc(size_t i) {
+ uint32_t GetMonitorEnterDexPc(size_t i) const {
return monitors_[i];
}
reg_to_lock_depths_.erase(reg);
}
- RegisterLine(size_t num_regs, MethodVerifier* verifier)
- : num_regs_(num_regs), this_initialized_(false) {
- memset(&line_, 0, num_regs_ * sizeof(uint16_t));
- SetResultTypeToUnknown(verifier);
- }
+ RegisterLine(size_t num_regs, MethodVerifier* verifier);
// Storage for the result register's type, valid after an invocation.
uint16_t result_[2];
const uint32_t num_regs_;
// A stack of monitor enter locations.
- std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
+ ScopedArenaVector<uint32_t> monitors_;
+
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
// monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
- AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
+ RegToLockDepthsMap reg_to_lock_depths_;
// Whether "this" initialization (a constructor supercall) has happened.
bool this_initialized_;
// An array of RegType Ids associated with each dex register.
- uint16_t line_[0];
+ uint16_t line_[1];
DISALLOW_COPY_AND_ASSIGN(RegisterLine);
};