OSDN Git Service

ARM: VIXL32: Implement InstanceOf, InvokeInterface and others.
authorArtem Serov <artem.serov@linaro.org>
Fri, 14 Oct 2016 14:58:56 +0000 (15:58 +0100)
committerArtem Serov <artem.serov@linaro.org>
Thu, 10 Nov 2016 14:54:09 +0000 (14:54 +0000)
More than 100 tests now start to pass.

Test: export ART_USE_VIXL_ARM_BACKEND=true && \
      mma test-art-host dist && mma test-art-target dist

Change-Id: I7a30241d42bf0beec254f6988646282f4ea46cef

compiler/optimizing/code_generator_arm_vixl.cc
compiler/optimizing/code_generator_arm_vixl.h
compiler/optimizing/common_arm.h
test/Android.arm_vixl.mk

index b9814b6..9e74a9d 100644 (file)
@@ -458,7 +458,7 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
     __ Bind(GetEntryLabel());
 
     if (!is_fatal_) {
-      TODO_VIXL32(FATAL);
+      SaveLiveRegisters(codegen, locations);
     }
 
     // We're moving two locations to locations that could overlap, so we need a parallel
@@ -472,7 +472,13 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
                                LocationFrom(calling_convention.GetRegisterAt(1)),
                                Primitive::kPrimNot);
     if (instruction_->IsInstanceOf()) {
-      TODO_VIXL32(FATAL);
+      arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
+      CheckEntrypointTypes<
+          kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
+      arm_codegen->Move32(locations->Out(), LocationFrom(r0));
     } else {
       DCHECK(instruction_->IsCheckCast());
       arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
@@ -483,7 +489,8 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
     }
 
     if (!is_fatal_) {
-      TODO_VIXL32(FATAL);
+      RestoreLiveRegisters(codegen, locations);
+      __ B(GetExitLabel());
     }
   }
 
@@ -850,9 +857,9 @@ void CodeGeneratorARMVIXL::Move32(Location destination, Location source) {
   }
 }
 
-void CodeGeneratorARMVIXL::MoveConstant(Location destination ATTRIBUTE_UNUSED,
-                                        int32_t value ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void CodeGeneratorARMVIXL::MoveConstant(Location location, int32_t value) {
+  DCHECK(location.IsRegister());
+  __ Mov(RegisterFrom(location), value);
 }
 
 void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
@@ -863,9 +870,15 @@ void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, Primitive::T
   GetMoveResolver()->EmitNativeCode(&move);
 }
 
-void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location ATTRIBUTE_UNUSED,
-                                             LocationSummary* locations ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else if (location.IsRegisterPair()) {
+    locations->AddTemp(LocationFrom(LowRegisterFrom(location)));
+    locations->AddTemp(LocationFrom(HighRegisterFrom(location)));
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
 }
 
 void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -1478,6 +1491,17 @@ void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED)
   codegen_->GenerateFrameExit();
 }
 
+void LocationsBuilderARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+  // The trampoline uses the same calling convention as dex calling conventions,
+  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+  // the method_idx.
+  HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+}
+
 void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   // Explicit clinit checks triggered by static invokes must have been pruned by
   // art::PrepareForRegisterAllocation.
@@ -1548,6 +1572,63 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke)
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
+void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
+  HandleInvoke(invoke);
+  // Add the hidden argument.
+  invoke->GetLocations()->AddTemp(LocationFrom(r12));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
+  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+  LocationSummary* locations = invoke->GetLocations();
+  vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+  vixl32::Register hidden_reg = RegisterFrom(locations->GetTemp(1));
+  Location receiver = locations->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+  DCHECK(!receiver.IsStackSlot());
+
+  // /* HeapReference<Class> */ temp = receiver->klass_
+  GetAssembler()->LoadFromOffset(kLoadWord, temp, RegisterFrom(receiver), class_offset);
+
+  codegen_->MaybeRecordImplicitNullCheck(invoke);
+  // Instead of simply (possibly) unpoisoning `temp` here, we should
+  // emit a read barrier for the previous class reference load.
+  // However this is not required in practice, as this is an
+  // intermediate/temporary reference and because the current
+  // concurrent copying collector keeps the from-space memory
+  // intact/accessible until the end of the marking phase (the
+  // concurrent copying collector may not in the future).
+  GetAssembler()->MaybeUnpoisonHeapReference(temp);
+  GetAssembler()->LoadFromOffset(kLoadWord,
+                                 temp,
+                                 temp,
+                                 mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
+  uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+      invoke->GetImtIndex(), kArmPointerSize));
+  // temp = temp->GetImtEntryAt(method_offset);
+  GetAssembler()->LoadFromOffset(kLoadWord, temp, temp, method_offset);
+  uint32_t entry_point =
+      ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value();
+  // LR = temp->GetEntryPoint();
+  GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point);
+
+  // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
+  // instruction from clobbering it as they might use r12 as a scratch register.
+  DCHECK(hidden_reg.Is(r12));
+  __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+
+  {
+    AssemblerAccurateScope aas(GetVIXLAssembler(),
+                               kArmInstrMaxSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+    // LR();
+    __ blx(lr);
+    DCHECK(!codegen_->IsLeafMethod());
+    codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+  }
+}
+
 void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3592,6 +3673,74 @@ void InstructionCodeGeneratorARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instr
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderARMVIXL::VisitNullCheck(HNullCheck* instruction) {
   // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
@@ -4762,6 +4911,196 @@ static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
        type_check_kind == TypeCheckKind::kArrayObjectCheck);
 }
 
+
+void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
+  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  bool baker_read_barrier_slow_path = false;
+  switch (type_check_kind) {
+    case TypeCheckKind::kExactCheck:
+    case TypeCheckKind::kAbstractClassCheck:
+    case TypeCheckKind::kClassHierarchyCheck:
+    case TypeCheckKind::kArrayObjectCheck:
+      call_kind =
+          kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+      baker_read_barrier_slow_path = kUseBakerReadBarrier;
+      break;
+    case TypeCheckKind::kArrayCheck:
+    case TypeCheckKind::kUnresolvedCheck:
+    case TypeCheckKind::kInterfaceCheck:
+      call_kind = LocationSummary::kCallOnSlowPath;
+      break;
+  }
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  if (baker_read_barrier_slow_path) {
+    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
+  }
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  // The "out" register is used as a temporary, so it overlaps with the inputs.
+  // Note that TypeCheckSlowPathARM uses this register too.
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+  // When read barriers are enabled, we need a temporary register for
+  // some cases.
+  if (TypeCheckNeedsATemporary(type_check_kind)) {
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  LocationSummary* locations = instruction->GetLocations();
+  Location obj_loc = locations->InAt(0);
+  vixl32::Register obj = InputRegisterAt(instruction, 0);
+  vixl32::Register cls = InputRegisterAt(instruction, 1);
+  Location out_loc = locations->Out();
+  vixl32::Register out = OutputRegister(instruction);
+  Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+      locations->GetTemp(0) :
+      Location::NoLocation();
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  vixl32::Label done, zero;
+  SlowPathCodeARMVIXL* slow_path = nullptr;
+
+  // Return 0 if `obj` is null.
+  // avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ Cbz(obj, &zero);
+  }
+
+  // /* HeapReference<Class> */ out = obj->klass_
+  GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc);
+
+  switch (type_check_kind) {
+    case TypeCheckKind::kExactCheck: {
+      __ Cmp(out, cls);
+      // Classes must be equal for the instanceof to succeed.
+      __ B(ne, &zero);
+      __ Mov(out, 1);
+      __ B(&done);
+      break;
+    }
+
+    case TypeCheckKind::kAbstractClassCheck: {
+      // If the class is abstract, we eagerly fetch the super class of the
+      // object to avoid doing a comparison we know will fail.
+      vixl32::Label loop;
+      __ Bind(&loop);
+      // /* HeapReference<Class> */ out = out->super_class_
+      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      // If `out` is null, we use it for the result, and jump to `done`.
+      __ Cbz(out, &done);
+      __ Cmp(out, cls);
+      __ B(ne, &loop);
+      __ Mov(out, 1);
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+
+    case TypeCheckKind::kClassHierarchyCheck: {
+      // Walk over the class hierarchy to find a match.
+      vixl32::Label loop, success;
+      __ Bind(&loop);
+      __ Cmp(out, cls);
+      __ B(eq, &success);
+      // /* HeapReference<Class> */ out = out->super_class_
+      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      __ Cbnz(out, &loop);
+      // If `out` is null, we use it for the result, and jump to `done`.
+      __ B(&done);
+      __ Bind(&success);
+      __ Mov(out, 1);
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+
+    case TypeCheckKind::kArrayObjectCheck: {
+      // Do an exact check.
+      vixl32::Label exact_check;
+      __ Cmp(out, cls);
+      __ B(eq, &exact_check);
+      // Otherwise, we need to check that the object's class is a non-primitive array.
+      // /* HeapReference<Class> */ out = out->component_type_
+      GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+      // If `out` is null, we use it for the result, and jump to `done`.
+      __ Cbz(out, &done);
+      GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
+      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+      __ Cbnz(out, &zero);
+      __ Bind(&exact_check);
+      __ Mov(out, 1);
+      __ B(&done);
+      break;
+    }
+
+    case TypeCheckKind::kArrayCheck: {
+      __ Cmp(out, cls);
+      DCHECK(locations->OnlyCallsOnSlowPath());
+      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
+                                                                        /* is_fatal */ false);
+      codegen_->AddSlowPath(slow_path);
+      __ B(ne, slow_path->GetEntryLabel());
+      __ Mov(out, 1);
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+
+    case TypeCheckKind::kUnresolvedCheck:
+    case TypeCheckKind::kInterfaceCheck: {
+      // Note that we indeed only call on slow path, but we always go
+      // into the slow path for the unresolved and interface check
+      // cases.
+      //
+      // We cannot directly call the InstanceofNonTrivial runtime
+      // entry point without resorting to a type checking slow path
+      // here (i.e. by calling InvokeRuntime directly), as it would
+      // require to assign fixed registers for the inputs of this
+      // HInstanceOf instruction (following the runtime calling
+      // convention), which might be cluttered by the potential first
+      // read barrier emission at the beginning of this method.
+      //
+      // TODO: Introduce a new runtime entry point taking the object
+      // to test (instead of its class) as argument, and let it deal
+      // with the read barrier issues. This will let us refactor this
+      // case of the `switch` code as it was previously (with a direct
+      // call to the runtime not using a type checking slow path).
+      // This should also be beneficial for the other cases above.
+      DCHECK(locations->OnlyCallsOnSlowPath());
+      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
+                                                                        /* is_fatal */ false);
+      codegen_->AddSlowPath(slow_path);
+      __ B(slow_path->GetEntryLabel());
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+  }
+
+  if (zero.IsReferenced()) {
+    __ Bind(&zero);
+    __ Mov(out, 0);
+  }
+
+  if (done.IsReferenced()) {
+    __ Bind(&done);
+  }
+
+  if (slow_path != nullptr) {
+    __ Bind(slow_path->GetExitLabel());
+  }
+}
+
 void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) {
   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
@@ -4807,6 +5146,9 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
       locations->GetTemp(1) :
       Location::NoLocation();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
 
   bool is_type_check_slow_path_fatal =
       (type_check_kind == TypeCheckKind::kExactCheck ||
@@ -4839,23 +5181,72 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
-      TODO_VIXL32(FATAL);
+      // If the class is abstract, we eagerly fetch the super class of the
+      // object to avoid doing a comparison we know will fail.
+      vixl32::Label loop;
+      __ Bind(&loop);
+      // /* HeapReference<Class> */ temp = temp->super_class_
+      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+
+      // Otherwise, compare the classes.
+      __ Cmp(temp, cls);
+      __ B(ne, &loop);
       break;
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
-      TODO_VIXL32(FATAL);
+      // Walk over the class hierarchy to find a match.
+      vixl32::Label loop;
+      __ Bind(&loop);
+      __ Cmp(temp, cls);
+      __ B(eq, &done);
+
+      // /* HeapReference<Class> */ temp = temp->super_class_
+      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise, jump to the beginning of the loop.
+      __ B(&loop);
       break;
     }
 
-    case TypeCheckKind::kArrayObjectCheck: {
-      TODO_VIXL32(FATAL);
+    case TypeCheckKind::kArrayObjectCheck:  {
+      // Do an exact check.
+      __ Cmp(temp, cls);
+      __ B(eq, &done);
+
+      // Otherwise, we need to check that the object's class is a non-primitive array.
+      // /* HeapReference<Class> */ temp = temp->component_type_
+      GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
+      // If the component type is null, jump to the slow path to throw the exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
+      // to further check that this component type is not a primitive type.
+      GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
+      static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot");
+      __ Cbnz(temp, type_check_slow_path->GetEntryLabel());
       break;
     }
 
     case TypeCheckKind::kUnresolvedCheck:
     case TypeCheckKind::kInterfaceCheck:
-      TODO_VIXL32(FATAL);
+      // We always go into the type check slow path for the unresolved
+      // and interface check cases.
+      //
+      // We cannot directly call the CheckCast runtime entry point
+      // without resorting to a type checking slow path here (i.e. by
+      // calling InvokeRuntime directly), as it would require to
+      // assign fixed registers for the inputs of this HInstanceOf
+      // instruction (following the runtime calling convention), which
+      // might be cluttered by the potential first read barrier
+      // emission at the beginning of this method.
+      __ B(type_check_slow_path->GetEntryLabel());
       break;
   }
   __ Bind(&done);
@@ -5034,6 +5425,22 @@ void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* i
   }
 }
 
+void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister(
+    HInstruction* instruction ATTRIBUTE_UNUSED,
+    Location out,
+    uint32_t offset,
+    Location maybe_temp ATTRIBUTE_UNUSED) {
+  vixl32::Register out_reg = RegisterFrom(out);
+  if (kEmitCompilerReadBarrier) {
+    TODO_VIXL32(FATAL);
+  } else {
+    // Plain load with no read barrier.
+    // /* HeapReference<Object> */ out = *(out + offset)
+    GetAssembler()->LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
+    GetAssembler()->MaybeUnpoisonHeapReference(out_reg);
+  }
+}
+
 void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters(
     HInstruction* instruction ATTRIBUTE_UNUSED,
     Location out,
index c583a44..2ccc30f 100644 (file)
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
 
 #include "code_generator_arm.h"
+#include "common_arm.h"
 #include "utils/arm/assembler_arm_vixl.h"
 
 // TODO(VIXL): make vixl clean wrt -Wshadow.
@@ -131,8 +132,11 @@ class LoadClassSlowPathARMVIXL;
   M(If)                                         \
   M(InstanceFieldGet)                           \
   M(InstanceFieldSet)                           \
+  M(InstanceOf)                                 \
   M(IntConstant)                                \
+  M(InvokeInterface)                            \
   M(InvokeStaticOrDirect)                       \
+  M(InvokeUnresolved)                           \
   M(InvokeVirtual)                              \
   M(LessThan)                                   \
   M(LessThanOrEqual)                            \
@@ -166,24 +170,25 @@ class LoadClassSlowPathARMVIXL;
   M(Throw)                                      \
   M(TryBoundary)                                \
   M(TypeConversion)                             \
+  M(UnresolvedInstanceFieldGet)                 \
+  M(UnresolvedInstanceFieldSet)                 \
+  M(UnresolvedStaticFieldGet)                   \
+  M(UnresolvedStaticFieldSet)                   \
   M(UShr)                                       \
   M(Xor)                                        \
 
 // TODO: Remove once the VIXL32 backend is implemented completely.
 #define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)   \
+  M(ArmDexCacheArraysBase)                      \
+  M(BitwiseNegatedRight)                        \
   M(BoundType)                                  \
   M(ClassTableGet)                              \
-  M(InstanceOf)                                 \
-  M(InvokeInterface)                            \
-  M(InvokeUnresolved)                           \
+  M(IntermediateAddress)                        \
   M(MonitorOperation)                           \
+  M(MultiplyAccumulate)                         \
   M(NativeDebugInfo)                            \
   M(PackedSwitch)                               \
   M(Rem)                                        \
-  M(UnresolvedInstanceFieldGet)                 \
-  M(UnresolvedInstanceFieldSet)                 \
-  M(UnresolvedStaticFieldGet)                   \
-  M(UnresolvedStaticFieldSet)                   \
 
 class CodeGeneratorARMVIXL;
 
@@ -215,6 +220,38 @@ class InvokeDexCallingConventionARMVIXL
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL);
 };
 
+class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionARMVIXL() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return helpers::LocationFrom(vixl::aarch32::r1);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return helpers::LocationFrom(vixl::aarch32::r0);
+  }
+  Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
+        : helpers::LocationFrom(vixl::aarch32::r0);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
+        : (is_instance
+            ? helpers::LocationFrom(vixl::aarch32::r2)
+            : helpers::LocationFrom(vixl::aarch32::r1));
+  }
+  Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
+        : helpers::LocationFrom(vixl::aarch32::s0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARMVIXL);
+};
+
 class SlowPathCodeARMVIXL : public SlowPathCode {
  public:
   explicit SlowPathCodeARMVIXL(HInstruction* instruction)
@@ -344,6 +381,19 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
                       bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
+  // Generate a heap reference load using one register `out`:
+  //
+  //   out <- *(out + offset)
+  //
+  // while honoring heap poisoning and/or read barriers (if any).
+  //
+  // Location `maybe_temp` is used when generating a read barrier and
+  // shall be a register in that case; it may be an invalid location
+  // otherwise.
+  void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+                                        Location out,
+                                        uint32_t offset,
+                                        Location maybe_temp);
   // Generate a heap reference load using two different registers
   // `out` and `obj`:
   //
index 13824ad..5129daf 100644 (file)
@@ -39,22 +39,22 @@ inline dwarf::Reg DWARFReg(vixl::aarch32::SRegister reg) {
 
 inline vixl::aarch32::Register HighRegisterFrom(Location location) {
   DCHECK(location.IsRegisterPair()) << location;
-  return vixl::aarch32::Register(location.AsRegisterPairHigh<vixl32::Register>());
+  return vixl::aarch32::Register(location.AsRegisterPairHigh<vixl::aarch32::Register>());
 }
 
 inline vixl::aarch32::DRegister HighDRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegisterPair()) << location;
-  return vixl::aarch32::DRegister(location.AsFpuRegisterPairHigh<vixl32::DRegister>());
+  return vixl::aarch32::DRegister(location.AsFpuRegisterPairHigh<vixl::aarch32::DRegister>());
 }
 
 inline vixl::aarch32::Register LowRegisterFrom(Location location) {
   DCHECK(location.IsRegisterPair()) << location;
-  return vixl::aarch32::Register(location.AsRegisterPairLow<vixl32::Register>());
+  return vixl::aarch32::Register(location.AsRegisterPairLow<vixl::aarch32::Register>());
 }
 
 inline vixl::aarch32::SRegister LowSRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegisterPair()) << location;
-  return vixl::aarch32::SRegister(location.AsFpuRegisterPairLow<vixl32::SRegister>());
+  return vixl::aarch32::SRegister(location.AsFpuRegisterPairLow<vixl::aarch32::SRegister>());
 }
 
 inline vixl::aarch32::Register RegisterFrom(Location location) {
index e562812..a167847 100644 (file)
 
 # Known broken tests for the ARM VIXL backend.
 TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
-  002-sleep \
   003-omnibus-opcodes \
-  004-InterfaceTest \
-  004-JniTest \
   004-NativeAllocations \
   004-ThreadStress \
-  004-checker-UnsafeTest18 \
-  005-annotations \
-  009-instanceof \
   012-math \
   015-switch \
   021-string2 \
-  022-interface \
-  023-many-interfaces \
-  024-illegal-access \
-  025-access-controller \
   028-array-write \
-  031-class-attributes \
-  035-enum \
   036-finalizer \
   037-inherit \
   042-new-instance \
   044-proxy \
-  045-reflect-array \
-  046-reflect \
-  047-returns \
-  048-reflect-v8 \
   050-sync-test \
   051-thread \
-  052-verifier-fun \
-  053-wait-some \
-  054-uncaught \
-  055-enum-performance \
-  058-enum-order \
-  061-out-of-memory \
-  062-character-encodings \
-  063-process-manager \
-  064-field-access \
-  065-mismatched-implements \
-  066-mismatched-super \
-  067-preemptive-unpark \
   068-classloader \
-  069-field-type \
-  071-dexfile \
   074-gc-thrash \
-  075-verification-error \
   079-phantom \
   080-oom-throw \
   082-inline-execute \
   083-compiler-regressions \
-  086-null-super \
-  087-gc-after-link \
   088-monitor-verification \
-  091-override-package-private-method \
-  093-serialization \
   096-array-copy-concurrent-gc \
-  098-ddmc \
   099-vmdebug \
   103-string-append \
-  104-growth-limit \
   106-exceptions2 \
   107-int-math2 \
-  108-check-cast \
-  109-suspend-check \
-  113-multidex \
   114-ParallelGC \
   120-hashcode \
   121-modifiers \
   122-npe \
   123-compiler-regressions-mt \
   123-inline-execute2 \
-  127-checker-secondarydex \
   129-ThreadGetId \
   132-daemon-locks-shutdown \
-  133-static-invoke-super \
-  134-reg-promotion \
-  135-MirandaDispatch \
-  136-daemon-jni-shutdown \
   137-cfi \
   138-duplicate-classes-check2 \
-  140-field-packing \
   141-class-unload \
-  142-classloader2 \
   144-static-field-sigquit \
-  146-bad-interface \
-  150-loadlibrary \
-  151-OpenFileLimit \
   201-built-in-except-detail-messages \
-  304-method-tracing \
   412-new-array \
-  416-optimizing-arith-not \
   417-optimizing-arith-div \
-  422-instanceof \
   422-type-conversion \
-  423-invoke-interface \
-  424-checkcast \
-  425-invoke-super \
   426-monitor \
   428-optimizing-arith-rem \
-  430-live-register-slow-path \
-  431-type-propagation \
-  432-optimizing-cmp \
-  434-invoke-direct \
   436-rem-float \
   437-inline \
   439-npe \
@@ -126,40 +66,15 @@ TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
   444-checker-nce \
   445-checker-licm \
   447-checker-inliner3 \
-  448-multiple-returns \
   449-checker-bce \
   450-checker-types \
-  452-multiple-returns2 \
-  453-not-byte \
   458-checker-instruct-simplification \
   458-long-to-fpu \
-  460-multiple-returns3 \
-  463-checker-boolean-simplifier \
-  467-regalloc-pair \
-  468-checker-bool-simplif-regression \
-  475-regression-inliner-ids \
-  477-checker-bound-type \
-  478-checker-clinit-check-pruning \
-  483-dce-block \
   485-checker-dce-switch \
-  486-checker-must-do-null-check \
   488-checker-inline-recursive-calls \
-  490-checker-inline \
-  492-checker-inline-invoke-interface \
-  493-checker-inline-invoke-interface \
-  494-checker-instanceof-tests \
-  495-checker-checkcast-tests \
-  496-checker-inlining-class-loader \
-  497-inlining-and-class-loader \
-  500-instanceof \
-  501-regression-packed-switch \
-  504-regression-baseline-entry \
   508-checker-disassembly \
   510-checker-try-catch \
   515-dce-dominator \
-  517-checker-builder-fallthrough \
-  518-null-array-get \
-  519-bound-load-class \
   520-equivalent-phi \
   522-checker-regression-monitor-exit \
   523-checker-can-throw-regression \
@@ -167,43 +82,24 @@ TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
   525-checker-arrays-fields2 \
   526-checker-caller-callee-regs \
   527-checker-array-access-split \
-  528-long-hint \
-  529-checker-unresolved \
   530-checker-loops1 \
   530-checker-loops2 \
   530-checker-lse \
-  530-checker-regression-reftyp-final \
-  530-instanceof-checkcast \
-  534-checker-bce-deoptimization \
+  530-checker-lse2 \
   535-regression-const-val \
   536-checker-intrinsic-optimization \
-  536-checker-needs-access-check \
-  537-checker-inline-and-unverified \
   538-checker-embed-constants \
-  540-checker-rtp-bug \
-  541-regression-inlined-deopt \
-  542-unresolved-access-check \
   543-checker-dce-trycatch \
-  543-env-long-ref \
-  545-tracing-and-jit \
   546-regression-simplify-catch \
   550-checker-multiply-accumulate \
-  550-checker-regression-wide-store \
-  552-checker-sharpening \
-  551-invoke-super \
   552-checker-primitive-typeprop \
-  552-invoke-non-existent-super \
-  553-invoke-super \
-  554-checker-rtp-checkcast \
+  552-checker-sharpening \
   555-UnsafeGetLong-regression \
-  556-invoke-super \
   558-switch \
   560-packed-switch \
   561-divrem \
-  562-bce-preheader \
-  563-checker-fakestring \
+  562-checker-no-intermediate \
   564-checker-negbitwise \
-  566-polymorphic-inlining \
   570-checker-osr \
   570-checker-select \
   573-checker-checkcast-regression \
@@ -211,39 +107,11 @@ TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
   575-checker-string-init-alias \
   580-checker-round \
   584-checker-div-bool \
-  586-checker-null-array-get \
-  587-inline-class-error \
   588-checker-irreducib-lifetime-hole \
-  589-super-imt \
-  592-checker-regression-bool-input \
   594-checker-array-alias \
-  594-invoke-super \
-  594-load-string-regression \
   597-deopt-new-string \
-  600-verifier-fails \
-  601-method-access \
   602-deoptimizeable \
-  603-checker-instanceof \
-  605-new-string-from-bytes \
-  608-checker-unresolved-lse \
-  609-checker-inline-interface \
-  610-arraycopy \
-  612-jit-dex-cache \
   700-LoadArgRegs \
   701-easy-div-rem \
   702-LargeBranchOffset \
   800-smali \
-  802-deoptimization \
-  960-default-smali \
-  963-default-range-smali \
-  965-default-verify \
-  966-default-conflict \
-  967-default-ame \
-  969-iface-super \
-  971-iface-super \
-  972-default-imt-collision \
-  972-iface-super-multidex \
-  973-default-multidex \
-  974-verify-interface-super \
-  975-iface-private \
-  979-invoke-polymorphic-accessors