From 381e4ca3cc9fc6405ff20e1de873a56d78d51923 Mon Sep 17 00:00:00 2001 From: Fred Shih Date: Mon, 25 Aug 2014 17:24:27 -0700 Subject: [PATCH] Filling hole between subclass and superclass. Subclasses no longer need to be 4-byte aligned at the end. Any gaps between a superclass and its subclasses will be filled in by halfword or byte fields if possible. Refactored the alignment and shuffling methods to use a priority queue in order to reduce the amount of logic when laying out objects. Change-Id: Ifed71af534e0c5e77bb14555c44b973fe66df6da --- runtime/class_linker.cc | 196 +++++++++++++++++++++++---------------------- runtime/class_linker.h | 13 --- runtime/mirror/class-inl.h | 5 -- 3 files changed, 102 insertions(+), 112 deletions(-) diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 4123fc639..fdf2958b1 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -133,6 +134,88 @@ static size_t Hash(const char* s) { return hash; } +// Gap between two fields in object layout. +struct FieldGap { + uint32_t start_offset; // The offset from the start of the object. + uint32_t size; // The gap size of 1, 2, or 4 bytes. +}; +struct FieldGapsComparator { + explicit FieldGapsComparator() { + } + bool operator() (const FieldGap& lhs, const FieldGap& rhs) + NO_THREAD_SAFETY_ANALYSIS { + // Sort by gap size, largest first. + return lhs.size > rhs.size; + } +}; +typedef std::priority_queue, FieldGapsComparator> FieldGaps; + +// Adds largest aligned gaps to queue of gaps. +void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) { + DCHECK(gaps != nullptr); + + uint32_t current_offset = gap_start; + while (current_offset != gap_end) { + size_t remaining = gap_end - current_offset; + if (remaining >= sizeof(uint32_t) && IsAligned<4>(current_offset)) { + gaps->push(FieldGap {current_offset, sizeof(uint32_t)}); + current_offset += sizeof(uint32_t); + } else if (remaining >= sizeof(uint16_t) && IsAligned<2>(current_offset)) { + gaps->push(FieldGap {current_offset, sizeof(uint16_t)}); + current_offset += sizeof(uint16_t); + } else { + gaps->push(FieldGap {current_offset, sizeof(uint8_t)}); + current_offset += sizeof(uint8_t); + } + DCHECK_LE(current_offset, gap_end) << "Overran gap"; + } +} +// Shuffle fields forward, making use of gaps whenever possible. +template +static void ShuffleForward(const size_t num_fields, size_t* current_field_idx, + MemberOffset* field_offset, + mirror::ObjectArray* fields, + std::deque* grouped_and_sorted_fields, + FieldGaps* gaps) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(current_field_idx != nullptr); + DCHECK(grouped_and_sorted_fields != nullptr); + DCHECK(fields != nullptr || (num_fields == 0 && grouped_and_sorted_fields->empty())); + DCHECK(gaps != nullptr); + DCHECK(field_offset != nullptr); + + DCHECK(IsPowerOfTwo(n)); + while (!grouped_and_sorted_fields->empty()) { + mirror::ArtField* field = grouped_and_sorted_fields->front(); + Primitive::Type type = field->GetTypeAsPrimitiveType(); + if (Primitive::ComponentSize(type) < n) { + break; + } + if (!IsAligned(field_offset->Uint32Value())) { + MemberOffset old_offset = *field_offset; + *field_offset = MemberOffset(RoundUp(field_offset->Uint32Value(), n)); + AddFieldGap(old_offset.Uint32Value(), field_offset->Uint32Value(), gaps); + } + CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types + grouped_and_sorted_fields->pop_front(); + fields->Set(*current_field_idx, field); + if (!gaps->empty() && gaps->top().size >= n) { + FieldGap gap = gaps->top(); + gaps->pop(); + DCHECK(IsAligned(gap.start_offset)); + field->SetOffset(MemberOffset(gap.start_offset)); + if (gap.size > n) { + AddFieldGap(gap.start_offset + n, gap.start_offset + gap.size, gaps); + } + } else { + DCHECK(IsAligned(field_offset->Uint32Value())); + field->SetOffset(*field_offset); + *field_offset = MemberOffset(field_offset->Uint32Value() + n); + } + ++(*current_field_idx); + } +} + const char* ClassLinker::class_roots_descriptors_[] = { "Ljava/lang/Class;", "Ljava/lang/Object;", @@ -2483,88 +2566,7 @@ void ClassLinker::LinkCode(ConstHandle method, have_portable_code); } -template -void ClassLinker::AlignFields(size_t& current_field, const size_t num_fields, - MemberOffset& field_offset, - mirror::ObjectArray* fields, - std::deque& grouped_and_sorted_fields) { - if (current_field != num_fields && !IsAligned(field_offset.Uint32Value())) { - size_t gap = (n - (field_offset.Uint32Value() & (n - 1))); - // Avoid padding unless a field that requires alignment actually exists. - bool needs_padding = false; - for (size_t i = 0; i < grouped_and_sorted_fields.size(); ++i) { - mirror::ArtField* field = grouped_and_sorted_fields[i]; - Primitive::Type type = field->GetTypeAsPrimitiveType(); - CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types - // Too big to fill the gap. - if (Primitive::ComponentSize(type) >= n) { - needs_padding = true; - continue; - } - if (needs_padding) { - // Shift as many fields as possible to fill the gaps. - size_t cursor = i; - mirror::ArtField* shift_field; - Primitive::Type shift_type; - while (cursor < grouped_and_sorted_fields.size() && gap > 0) { - // Find field that can current in current gap. - do { - DCHECK_LT(cursor, grouped_and_sorted_fields.size()) << "Cursor overran fields."; - shift_field = grouped_and_sorted_fields[cursor]; - shift_type = shift_field->GetTypeAsPrimitiveType(); - CHECK(shift_type != Primitive::kPrimNot) << PrettyField(shift_field); - // Can fit. - if (Primitive::ComponentSize(shift_type) <= gap) { - break; - } - ++cursor; - } while (cursor < grouped_and_sorted_fields.size()); - - if (cursor < grouped_and_sorted_fields.size()) { - fields->Set(current_field++, shift_field); - shift_field->SetOffset(field_offset); - field_offset = MemberOffset(field_offset.Uint32Value() + - Primitive::ComponentSize(shift_type)); - gap -= Primitive::ComponentSize(shift_type); - grouped_and_sorted_fields.erase(grouped_and_sorted_fields.begin() + cursor); - } - } - } - break; - } - // No further shuffling available, pad whatever space is left. - if (needs_padding) { - field_offset = MemberOffset(field_offset.Uint32Value() + gap); - } - DCHECK(!needs_padding || IsAligned(field_offset.Uint32Value())) << "Needed " << - n << " byte alignment, but not aligned after align with offset: " << - field_offset.Uint32Value(); - } -} -template -void ClassLinker::ShuffleForward(size_t ¤t_field, const size_t num_fields, - MemberOffset& field_offset, - mirror::ObjectArray* fields, - std::deque& grouped_and_sorted_fields) { - while (!grouped_and_sorted_fields.empty() && current_field != num_fields) { - mirror::ArtField* field = grouped_and_sorted_fields.front(); - Primitive::Type type = field->GetTypeAsPrimitiveType(); - CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types - if (Primitive::ComponentSize(type) != n) { - DCHECK_LT(Primitive::ComponentSize(type), static_cast(n)) << - "Encountered a larger field (" << Primitive::ComponentSize(type) << ") " << - "while shuffling fields of size: " << n; - // We should've shuffled all field of size n forward by this point. - break; - } - DCHECK(IsAligned(field_offset.Uint32Value())); - grouped_and_sorted_fields.pop_front(); - fields->Set(current_field++, field); - field->SetOffset(field_offset); - field_offset = MemberOffset(field_offset.Uint32Value() + n); - } -} void ClassLinker::LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, @@ -4830,9 +4832,11 @@ bool ClassLinker::LinkFields(ConstHandle klass, bool is_static, s std::sort(grouped_and_sorted_fields.begin(), grouped_and_sorted_fields.end(), LinkFieldsComparator()); - // References should be at the front, unless we need to pad. + // References should be at the front. size_t current_field = 0; size_t num_reference_fields = 0; + FieldGaps gaps; + for (; current_field < num_fields; current_field++) { mirror::ArtField* field = grouped_and_sorted_fields.front(); Primitive::Type type = field->GetTypeAsPrimitiveType(); @@ -4840,27 +4844,31 @@ bool ClassLinker::LinkFields(ConstHandle klass, bool is_static, s if (isPrimitive) { break; // past last reference, move on to the next phase } + if (UNLIKELY(!IsAligned<4>(field_offset.Uint32Value()))) { + MemberOffset old_offset = field_offset; + field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4)); + AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps); + } + DCHECK(IsAligned<4>(field_offset.Uint32Value())); grouped_and_sorted_fields.pop_front(); num_reference_fields++; fields->Set(current_field, field); field->SetOffset(field_offset); field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t)); } - - AlignFields<8>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); - ShuffleForward<8>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); - // No need for further alignment, start of object is 4-byte aligned. - ShuffleForward<4>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); - ShuffleForward<2>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); - ShuffleForward<1>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); + // Gaps are stored as a max heap which means that we must shuffle from largest to smallest + // otherwise we could end up with suboptimal gap fills. + ShuffleForward<8>(num_fields, ¤t_field, &field_offset, + fields, &grouped_and_sorted_fields, &gaps); + ShuffleForward<4>(num_fields, ¤t_field, &field_offset, + fields, &grouped_and_sorted_fields, &gaps); + ShuffleForward<2>(num_fields, ¤t_field, &field_offset, + fields, &grouped_and_sorted_fields, &gaps); + ShuffleForward<1>(num_fields, ¤t_field, &field_offset, + fields, &grouped_and_sorted_fields, &gaps); CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() << " fields."; - // Subclass expects superclass to be 4 byte aligned at end. - if (!IsAligned<4>(field_offset.Uint32Value())) { - field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4)); - } - CHECK(IsAligned<4>(field_offset.Uint32Value())); Thread::Current()->EndAssertNoThreadSuspension(old_no_suspend_cause); // We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it. diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 52ecff617..c7d60608a 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -527,19 +527,6 @@ class ClassLinker { void LinkCode(ConstHandle method, const OatFile::OatClass* oat_class, const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template - void AlignFields(size_t& current_field, const size_t num_fields, - MemberOffset& field_offset, - mirror::ObjectArray* fields, - std::deque& grouped_and_sorted_fields) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template - void ShuffleForward(size_t& current_field, const size_t num_fields, - MemberOffset& field_offset, - mirror::ObjectArray* fields, - std::deque& grouped_and_sorted_fields) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CreateReferenceInstanceOffsets(ConstHandle klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceStaticOffsets(ConstHandle klass) diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 52dd0eed3..726e92859 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -599,11 +599,6 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, (num_16bit_static_fields * sizeof(uint16_t)) + (num_32bit_static_fields * sizeof(uint32_t)) + (num_64bit_static_fields * sizeof(uint64_t)); - // For now, the start of of subclass expects to be 4-byte aligned, pad end of object to ensure - // alignment. - if (!IsAligned<4>(size)) { - size = RoundUp(size, 4); - } return size; } -- 2.11.0