Skip to content

Commit

Permalink
Revert "Allocate dex cache arrays at startup."
Browse files Browse the repository at this point in the history
This reverts commit a25aead.

Bug: 255465158

Reason for revert:b/255465158

Change-Id: Ic9c89532bfec75d2706b8dbcc599fad5c4c71735
  • Loading branch information
Nicolas Geoffray authored and Treehugger Robot committed Oct 26, 2022
1 parent 39083d5 commit cc97f11
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 97 deletions.
1 change: 1 addition & 0 deletions runtime/linear_alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ class TrackingHeader final {

std::ostream& operator<<(std::ostream& os, LinearAllocKind value);

// TODO: Support freeing if we add class unloading.
class LinearAlloc {
public:
static constexpr size_t kAlignment = 8u;
Expand Down
12 changes: 3 additions & 9 deletions runtime/mirror/dex_cache-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ static void InitializeArray(T*) {
}

template<typename T>
T* DexCache::AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind, bool startup) {
T* DexCache::AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind) {
Thread* self = Thread::Current();
mirror::DexCache* dex_cache = this;
if (gUseReadBarrier && self->GetIsGcMarking()) {
Expand All @@ -63,14 +63,8 @@ T* DexCache::AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kin
dex_cache = reinterpret_cast<DexCache*>(ReadBarrier::Mark(this));
}
// DON'T USE 'this' from now on.
Runtime* runtime = Runtime::Current();
// Note: in the 1002-notify-startup test, the startup linear alloc can become null
// concurrently, even if the runtime is marked at startup. Therefore we should only
// fetch it once here.
LinearAlloc* startup_linear_alloc = runtime->GetStartupLinearAlloc();
LinearAlloc* alloc = (startup && startup_linear_alloc != nullptr)
? startup_linear_alloc
: runtime->GetClassLinker()->GetOrCreateAllocatorForClassLoader(GetClassLoader());
ClassLinker* linker = Runtime::Current()->GetClassLinker();
LinearAlloc* alloc = linker->GetOrCreateAllocatorForClassLoader(GetClassLoader());
MutexLock mu(self, *Locks::dex_cache_lock_); // Avoid allocation by multiple threads.
T* array = dex_cache->GetFieldPtr64<T*>(obj_offset);
if (array != nullptr) {
Expand Down
16 changes: 0 additions & 16 deletions runtime/mirror/dex_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -165,21 +165,5 @@ ObjPtr<ClassLoader> DexCache::GetClassLoader() {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(DexCache, class_loader_));
}

bool DexCache::AtStartup() {
return !Runtime::Current()->GetStartupCompleted();
}

void DexCache::UnlinkStartupCaches() {
if (GetDexFile() == nullptr) {
// Unused dex cache.
return;
}
UnlinkStringsArrayIfStartup();
UnlinkResolvedFieldsArrayIfStartup();
UnlinkResolvedMethodsArrayIfStartup();
UnlinkResolvedTypesArrayIfStartup();
UnlinkResolvedMethodTypesArrayIfStartup();
}

} // namespace mirror
} // namespace art
30 changes: 5 additions & 25 deletions runtime/mirror/dex_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -365,10 +365,6 @@ class MANAGED DexCache final : public Object {
void VisitNativeRoots(const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);

// Sets null to dex cache array fields which were allocated with the startup
// allocator.
void UnlinkStartupCaches() REQUIRES_SHARED(Locks::mutator_lock_);

// NOLINTBEGIN(bugprone-macro-parentheses)
#define DEFINE_ARRAY(name, array_kind, getter_setter, type, ids, alloc_kind) \
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
Expand All @@ -384,10 +380,10 @@ class MANAGED DexCache final : public Object {
static constexpr MemberOffset getter_setter ##Offset() { \
return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \
} \
array_kind* Allocate ##getter_setter(bool startup = false) \
array_kind* Allocate ##getter_setter() \
REQUIRES_SHARED(Locks::mutator_lock_) { \
return reinterpret_cast<array_kind*>(AllocArray<type>( \
getter_setter ##Offset(), GetDexFile()->ids(), alloc_kind, startup)); \
getter_setter ##Offset(), GetDexFile()->ids(), alloc_kind)); \
} \
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \
size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \
Expand Down Expand Up @@ -447,9 +443,8 @@ class MANAGED DexCache final : public Object {
} else { \
auto* pairs = Get ##getter_setter(); \
if (pairs == nullptr) { \
bool should_allocate_full_array = ShouldAllocateFullArray(GetDexFile()->ids(), pair_size); \
if (AtStartup() || should_allocate_full_array) { \
array = Allocate ##getter_setter ##Array(!should_allocate_full_array); \
if (GetDexFile()->ids() <= pair_size) { \
array = Allocate ##getter_setter ##Array(); \
array->Set(index, resolved); \
} else { \
pairs = Allocate ##getter_setter(); \
Expand All @@ -459,12 +454,6 @@ class MANAGED DexCache final : public Object {
pairs->Set(index, resolved); \
} \
} \
} \
void Unlink ##getter_setter ##ArrayIfStartup() \
REQUIRES_SHARED(Locks::mutator_lock_) { \
if (!ShouldAllocateFullArray(GetDexFile()->ids(), pair_size)) { \
Set ##getter_setter ##Array(nullptr) ; \
} \
}

DEFINE_ARRAY(resolved_call_sites_,
Expand Down Expand Up @@ -534,7 +523,7 @@ class MANAGED DexCache final : public Object {
private:
// Allocate new array in linear alloc and save it in the given fields.
template<typename T>
T* AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind, bool startup = false)
T* AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind)
REQUIRES_SHARED(Locks::mutator_lock_);

// Visit instance fields of the dex cache as well as its associated arrays.
Expand All @@ -545,15 +534,6 @@ class MANAGED DexCache final : public Object {
void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);

// Returns whether the runtime is currently in startup mode.
static bool AtStartup();

// Returns whether we should allocate a full array given the number of
// elements.
static bool ShouldAllocateFullArray(size_t number_of_elements, size_t dex_cache_size) {
return number_of_elements <= dex_cache_size;
}

HeapReference<ClassLoader> class_loader_;
HeapReference<String> location_;

Expand Down
38 changes: 3 additions & 35 deletions runtime/runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,6 @@ Runtime::~Runtime() {
// Destroy allocators before shutting down the MemMap because they may use it.
java_vm_.reset();
linear_alloc_.reset();
startup_linear_alloc_.reset();
linear_alloc_arena_pool_.reset();
arena_pool_.reset();
jit_arena_pool_.reset();
Expand Down Expand Up @@ -1749,7 +1748,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
linear_alloc_arena_pool_.reset(new MemMapArenaPool(low_4gb));
}
linear_alloc_.reset(CreateLinearAlloc());
startup_linear_alloc_.reset(CreateLinearAlloc());

small_irt_allocator_ = new SmallIrtAllocator();

Expand Down Expand Up @@ -3308,40 +3306,18 @@ void Runtime::ResetStartupCompleted() {
startup_completed_.store(false, std::memory_order_seq_cst);
}

class UnlinkStartupDexCacheVisitor : public DexCacheVisitor {
public:
void Visit(ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_) override {
dex_cache->UnlinkStartupCaches();
}
};

class Runtime::NotifyStartupCompletedTask : public gc::HeapTask {
public:
NotifyStartupCompletedTask() : gc::HeapTask(/*target_run_time=*/ NanoTime()) {}

void Run(Thread* self) override {
VLOG(startup) << "NotifyStartupCompletedTask running";
Runtime* const runtime = Runtime::Current();
// Fetch the startup linear alloc before the checkpoint to play nice with
// 1002-notify-startup test which resets the startup state.
std::unique_ptr<LinearAlloc> startup_linear_alloc(runtime->ReleaseStartupLinearAlloc());
{
ScopedTrace trace("Releasing dex caches and app image spaces metadata");
ScopedTrace trace("Releasing app image spaces metadata");
ScopedObjectAccess soa(Thread::Current());

{
// Unlink dex caches that were allocated with the startup linear alloc.
UnlinkStartupDexCacheVisitor visitor;
ReaderMutexLock mu(self, *Locks::dex_lock_);
runtime->GetClassLinker()->VisitDexCaches(&visitor);
}

// Request empty checkpoints to make sure no threads are:
// - accessing the image space metadata section when we madvise it
// - accessing dex caches when we free them
//
// Use GC exclusion to prevent deadlocks that may happen if
// Request empty checkpoints to make sure no threads are accessing the image space metadata
// section when we madvise it. Use GC exclusion to prevent deadlocks that may happen if
// multiple threads are attempting to run empty checkpoints at the same time.
{
// Avoid using ScopedGCCriticalSection since that does not allow thread suspension. This is
Expand All @@ -3352,7 +3328,6 @@ class Runtime::NotifyStartupCompletedTask : public gc::HeapTask {
gc::kCollectorTypeCriticalSection);
runtime->GetThreadList()->RunEmptyCheckpoint();
}

for (gc::space::ContinuousSpace* space : runtime->GetHeap()->GetContinuousSpaces()) {
if (space->IsImageSpace()) {
gc::space::ImageSpace* image_space = space->AsImageSpace();
Expand All @@ -3368,13 +3343,6 @@ class Runtime::NotifyStartupCompletedTask : public gc::HeapTask {
ScopedTrace trace2("Delete thread pool");
runtime->DeleteThreadPool();
}

{
// We know that after the checkpoint, there is no thread that can hold
// the startup linear alloc, so it's safe to delete it now.
ScopedTrace trace2("Delete startup linear alloc");
startup_linear_alloc.reset();
}
}
};

Expand Down
12 changes: 0 additions & 12 deletions runtime/runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -820,10 +820,6 @@ class Runtime {
return linear_alloc_.get();
}

LinearAlloc* GetStartupLinearAlloc() {
return startup_linear_alloc_.get();
}

jit::JitOptions* GetJITOptions() {
return jit_options_.get();
}
Expand Down Expand Up @@ -1066,10 +1062,6 @@ class Runtime {
ThreadPool* const thread_pool_;
};

LinearAlloc* ReleaseStartupLinearAlloc() {
return startup_linear_alloc_.release();
}

bool LoadAppImageStartupCache() const {
return load_app_image_startup_cache_;
}
Expand Down Expand Up @@ -1286,10 +1278,6 @@ class Runtime {
// Shared linear alloc for now.
std::unique_ptr<LinearAlloc> linear_alloc_;

// Linear alloc used for allocations during startup. Will be deleted after
// startup.
std::unique_ptr<LinearAlloc> startup_linear_alloc_;

// The number of spins that are done before thread suspension is used to forcibly inflate.
size_t max_spins_before_thin_lock_inflation_;
MonitorList* monitor_list_;
Expand Down

0 comments on commit cc97f11

Please sign in to comment.