Mutex* Locks::jni_function_table_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
-Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
MutatorMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
- UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
- DCHECK(mem_maps_lock_ == nullptr);
- mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
- kMemMapsLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
- // Guards the maps in mem_map.
- static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
};
return os;
}
+std::mutex* MemMap::mem_maps_lock_ = nullptr;
MemMap::Maps* MemMap::maps_ = nullptr;
#if USE_ART_LOW_4G_ALLOCATOR
// There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
// further.
{
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
for (auto& pair : *maps_) {
MemMap* const map = pair.second;
if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
}
// Remove it from maps_.
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
bool found = false;
DCHECK(maps_ != nullptr);
for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
CHECK_NE(base_size_, 0U);
// Add it to maps_.
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
DCHECK(maps_ != nullptr);
maps_->insert(std::make_pair(base_begin_, this));
}
}
bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
CHECK(begin_map != nullptr);
CHECK(end_map != nullptr);
CHECK(HasMemMap(begin_map));
}
void MemMap::DumpMaps(std::ostream& os, bool terse) {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
DumpMapsLocked(os, terse);
}
}
void MemMap::Init() {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- if (maps_ == nullptr) {
+ if (mem_maps_lock_ != nullptr) {
// dex2oat calls MemMap::Init twice since its needed before the runtime is created.
- maps_ = new Maps;
+ return;
}
+ mem_maps_lock_ = new std::mutex();
+ // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_).
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ DCHECK(maps_ == nullptr);
+ maps_ = new Maps;
}
void MemMap::Shutdown() {
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- delete maps_;
- maps_ = nullptr;
+ if (mem_maps_lock_ == nullptr) {
+ // If MemMap::Shutdown is called more than once, there is no effect.
+ return;
+ }
+ {
+ // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_).
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ DCHECK(maps_ != nullptr);
+ delete maps_;
+ maps_ = nullptr;
+ }
+ delete mem_maps_lock_;
+ mem_maps_lock_ = nullptr;
}
void MemMap::SetSize(size_t new_size) {
if (low_4gb && addr == nullptr) {
bool first_run = true;
- MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
// Use maps_ as an optimization to skip over large maps.
// Find the first map which is address > ptr.
#include <string>
#include <map>
+#include <mutex>
#include <stddef.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
std::string* error_msg);
// Releases the memory mapping.
- ~MemMap() REQUIRES(!Locks::mem_maps_lock_);
+ ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
const std::string& GetName() const {
return name_;
bool use_ashmem = true);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
- REQUIRES(!Locks::mem_maps_lock_);
+ REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMaps(std::ostream& os, bool terse = false)
- REQUIRES(!Locks::mem_maps_lock_);
+ REQUIRES(!MemMap::mem_maps_lock_);
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
- static void Init() REQUIRES(!Locks::mem_maps_lock_);
- static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
+ // Init and Shutdown are NOT thread safe.
+ // Both may be called multiple times and MemMap objects may be created any
+ // time after the first call to Init and before the first call to Shutodwn.
+ static void Init() REQUIRES(!MemMap::mem_maps_lock_);
+ static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
// If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
// faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
size_t base_size,
int prot,
bool reuse,
- size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
+ size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMapsLocked(std::ostream& os, bool terse)
- REQUIRES(Locks::mem_maps_lock_);
+ REQUIRES(MemMap::mem_maps_lock_);
static bool HasMemMap(MemMap* map)
- REQUIRES(Locks::mem_maps_lock_);
+ REQUIRES(MemMap::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
- REQUIRES(Locks::mem_maps_lock_);
+ REQUIRES(MemMap::mem_maps_lock_);
static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
- REQUIRES(!Locks::mem_maps_lock_);
+ REQUIRES(!MemMap::mem_maps_lock_);
// Internal version of mmap that supports low 4gb emulation.
static void* MapInternal(void* addr,
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
#endif
+ static std::mutex* mem_maps_lock_;
+
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
- static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
+ static Maps* maps_ GUARDED_BY(MemMap::mem_maps_lock_);
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};