1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Functionality for managing devices that are handled by RTL plugins.
11 //===----------------------------------------------------------------------===//
14 #include "MemoryManager.h"
23 DeviceTy::DeviceTy(const DeviceTy &D)
24 : DeviceID(D.DeviceID), RTL(D.RTL), RTLDeviceID(D.RTLDeviceID),
25 IsInit(D.IsInit), InitFlag(), HasPendingGlobals(D.HasPendingGlobals),
26 HostDataToTargetMap(D.HostDataToTargetMap),
27 PendingCtorsDtors(D.PendingCtorsDtors), ShadowPtrMap(D.ShadowPtrMap),
28 DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(),
29 LoopTripCnt(D.LoopTripCnt), MemoryManager(nullptr) {}
31 DeviceTy &DeviceTy::operator=(const DeviceTy &D) {
32 DeviceID = D.DeviceID;
34 RTLDeviceID = D.RTLDeviceID;
36 HasPendingGlobals = D.HasPendingGlobals;
37 HostDataToTargetMap = D.HostDataToTargetMap;
38 PendingCtorsDtors = D.PendingCtorsDtors;
39 ShadowPtrMap = D.ShadowPtrMap;
40 LoopTripCnt = D.LoopTripCnt;
45 DeviceTy::DeviceTy(RTLInfoTy *RTL)
46 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(),
47 HasPendingGlobals(false), HostDataToTargetMap(), PendingCtorsDtors(),
48 ShadowPtrMap(), DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(),
49 MemoryManager(nullptr) {}
51 DeviceTy::~DeviceTy() {
52 if (DeviceID == -1 || !(getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE))
55 ident_t loc = {0, 0, 0, 0, ";libomptarget;libomptarget;0;0;;"};
56 dumpTargetPointerMappings(&loc, *this);
59 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) {
62 // Check if entry exists
63 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin});
64 if (search != HostDataToTargetMap.end()) {
65 // Mapping already exists
66 bool isValid = search->HstPtrEnd == (uintptr_t)HstPtrBegin + Size &&
67 search->TgtPtrBegin == (uintptr_t)TgtPtrBegin;
70 DP("Attempt to re-associate the same device ptr+offset with the same "
71 "host ptr, nothing to do\n");
72 return OFFLOAD_SUCCESS;
74 REPORT("Not allowed to re-associate a different device ptr+offset with "
75 "the same host ptr\n");
80 // Mapping does not exist, allocate it with refCount=INF
81 HostDataToTargetTy newEntry((uintptr_t)HstPtrBegin /*HstPtrBase*/,
82 (uintptr_t)HstPtrBegin /*HstPtrBegin*/,
83 (uintptr_t)HstPtrBegin + Size /*HstPtrEnd*/,
84 (uintptr_t)TgtPtrBegin /*TgtPtrBegin*/, nullptr,
85 true /*IsRefCountINF*/);
87 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd="
88 DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase),
89 DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd),
90 DPxPTR(newEntry.TgtPtrBegin));
91 HostDataToTargetMap.insert(newEntry);
95 return OFFLOAD_SUCCESS;
98 int DeviceTy::disassociatePtr(void *HstPtrBegin) {
101 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin});
102 if (search != HostDataToTargetMap.end()) {
104 if (search->isRefCountInf()) {
105 DP("Association found, removing it\n");
106 HostDataToTargetMap.erase(search);
108 return OFFLOAD_SUCCESS;
110 REPORT("Trying to disassociate a pointer which was not mapped via "
111 "omp_target_associate_ptr\n");
117 REPORT("Association not found\n");
121 // Get ref count of map entry containing HstPtrBegin
122 uint64_t DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) {
123 uintptr_t hp = (uintptr_t)HstPtrBegin;
127 if (!HostDataToTargetMap.empty()) {
128 auto upper = HostDataToTargetMap.upper_bound(hp);
129 if (upper != HostDataToTargetMap.begin()) {
131 if (hp >= upper->HstPtrBegin && hp < upper->HstPtrEnd) {
132 DP("DeviceTy::getMapEntry: requested entry found\n");
133 RefCnt = upper->getRefCount();
140 DP("DeviceTy::getMapEntry: requested entry not found\n");
146 LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) {
147 uintptr_t hp = (uintptr_t)HstPtrBegin;
150 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n",
153 if (HostDataToTargetMap.empty())
156 auto upper = HostDataToTargetMap.upper_bound(hp);
157 // check the left bin
158 if (upper != HostDataToTargetMap.begin()) {
159 lr.Entry = std::prev(upper);
160 auto &HT = *lr.Entry;
162 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd &&
163 (hp+Size) <= HT.HstPtrEnd;
164 // Does it extend beyond the mapped region?
165 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd;
168 // check the right bin
169 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) &&
170 upper != HostDataToTargetMap.end()) {
172 auto &HT = *lr.Entry;
173 // Does it extend into an already mapped region?
174 lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin;
175 // Does it extend beyond the mapped region?
176 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd;
179 if (lr.Flags.ExtendsBefore) {
180 DP("WARNING: Pointer is not mapped but section extends into already "
183 if (lr.Flags.ExtendsAfter) {
184 DP("WARNING: Pointer is already mapped but section extends beyond mapped "
191 // Used by targetDataBegin
192 // Return the target pointer begin (where the data will be moved).
193 // Allocate memory if this is the first occurrence of this mapping.
194 // Increment the reference counter.
195 // If NULL is returned, then either data allocation failed or the user tried
196 // to do an illegal mapping.
197 void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase,
198 int64_t Size, map_var_info_t HstPtrName,
199 bool &IsNew, bool &IsHostPtr, bool IsImplicit,
200 bool UpdateRefCount, bool HasCloseModifier,
201 bool HasPresentModifier) {
206 LookupResult lr = lookupMapping(HstPtrBegin, Size);
208 // Check if the pointer is contained.
209 // If a variable is mapped to the device manually by the user - which would
210 // lead to the IsContained flag to be true - then we must ensure that the
211 // device address is returned even under unified memory conditions.
212 if (lr.Flags.IsContained ||
213 ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) {
214 auto &HT = *lr.Entry;
220 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin);
221 if (getDebugLevel() || getInfoLevel() & OMP_INFOTYPE_MAPPING_EXISTS)
223 "Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD
225 "Size=%" PRId64 ",%s RefCount=%s, Name=%s\n",
226 (IsImplicit ? " (implicit)" : ""), DPxPTR(HstPtrBegin), DPxPTR(tp),
227 Size, (UpdateRefCount ? " updated" : ""),
228 HT.isRefCountInf() ? "INF"
229 : std::to_string(HT.getRefCount()).c_str(),
230 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown");
232 } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) {
233 // Explicit extension of mapped data - not allowed.
234 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD
235 " (%" PRId64 " bytes), but device allocation maps to host at "
236 DPxMOD " (%" PRId64 " bytes)",
237 DPxPTR(HstPtrBegin), Size, DPxPTR(lr.Entry->HstPtrBegin),
238 lr.Entry->HstPtrEnd - lr.Entry->HstPtrBegin);
239 if (HasPresentModifier)
240 MESSAGE("device mapping required by 'present' map type modifier does not "
241 "exist for host address " DPxMOD " (%" PRId64 " bytes)",
242 DPxPTR(HstPtrBegin), Size);
243 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY &&
245 // If unified shared memory is active, implicitly mapped variables that are
246 // not privatized use host address. Any explicitly mapped variables also use
247 // host address where correctness is not impeded. In all other cases maps
249 // In addition to the mapping rules above, the close map modifier forces the
250 // mapping of the variable to the device.
252 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n",
253 DPxPTR((uintptr_t)HstPtrBegin), Size,
254 (UpdateRefCount ? " updated" : ""));
258 } else if (HasPresentModifier) {
259 DP("Mapping required by 'present' map type modifier does not exist for "
260 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n",
261 DPxPTR(HstPtrBegin), Size);
262 MESSAGE("device mapping required by 'present' map type modifier does not "
263 "exist for host address " DPxMOD " (%" PRId64 " bytes)",
264 DPxPTR(HstPtrBegin), Size);
266 // If it is not contained and Size > 0, we should create a new entry for it.
268 uintptr_t tp = (uintptr_t)allocData(Size, HstPtrBegin);
269 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", "
270 "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n",
271 DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin),
272 DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp));
273 HostDataToTargetMap.emplace(
274 HostDataToTargetTy((uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin,
275 (uintptr_t)HstPtrBegin + Size, tp, HstPtrName));
283 // Used by targetDataBegin, targetDataEnd, targetDataUpdate and target.
284 // Return the target pointer begin (where the data will be moved).
285 // Decrement the reference counter if called from targetDataEnd.
286 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast,
287 bool UpdateRefCount, bool &IsHostPtr,
293 LookupResult lr = lookupMapping(HstPtrBegin, Size);
295 if (lr.Flags.IsContained ||
296 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) {
297 auto &HT = *lr.Entry;
298 IsLast = HT.getRefCount() == 1;
300 if (!IsLast && UpdateRefCount)
303 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin);
304 DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", "
305 "Size=%" PRId64 ",%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp),
306 Size, (UpdateRefCount ? " updated" : ""),
307 HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str());
309 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) {
310 // If the value isn't found in the mapping and unified shared memory
311 // is on then it means we have stumbled upon a value which we need to
312 // use directly from the host.
313 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n",
314 DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : ""));
323 // Return the target pointer begin (where the data will be moved).
324 // Lock-free version called when loading global symbols from the fat binary.
325 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) {
326 uintptr_t hp = (uintptr_t)HstPtrBegin;
327 LookupResult lr = lookupMapping(HstPtrBegin, Size);
328 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
329 auto &HT = *lr.Entry;
330 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin);
337 int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete,
338 bool HasCloseModifier) {
339 if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY &&
341 return OFFLOAD_SUCCESS;
342 // Check if the pointer is contained in any sub-nodes.
345 LookupResult lr = lookupMapping(HstPtrBegin, Size);
346 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
347 auto &HT = *lr.Entry;
350 if (HT.decRefCount() == 0) {
351 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n",
352 DPxPTR(HT.TgtPtrBegin), Size);
353 deleteData((void *)HT.TgtPtrBegin);
354 DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD
355 ", Size=%" PRId64 "\n", (ForceDelete ? " (forced)" : ""),
356 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size);
357 HostDataToTargetMap.erase(lr.Entry);
359 rc = OFFLOAD_SUCCESS;
361 REPORT("Section to delete (hst addr " DPxMOD ") does not exist in the"
362 " allocated memory\n",
363 DPxPTR(HstPtrBegin));
371 /// Init device, should not be called directly.
372 void DeviceTy::init() {
373 // Make call to init_requires if it exists for this plugin.
374 if (RTL->init_requires)
375 RTL->init_requires(PM->RTLs.RequiresFlags);
376 int32_t Ret = RTL->init_device(RTLDeviceID);
377 if (Ret != OFFLOAD_SUCCESS)
380 // The memory manager will only be disabled when users provide a threshold via
381 // the environment variable \p LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD and set
383 if (const char *Env = std::getenv("LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD")) {
384 size_t Threshold = std::stoul(Env);
386 MemoryManager = std::make_unique<MemoryManagerTy>(*this, Threshold);
388 MemoryManager = std::make_unique<MemoryManagerTy>(*this);
393 /// Thread-safe method to initialize the device only once.
394 int32_t DeviceTy::initOnce() {
395 std::call_once(InitFlag, &DeviceTy::init, this);
397 // At this point, if IsInit is true, then either this thread or some other
398 // thread in the past successfully initialized the device, so we can return
399 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it
400 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means
401 // that some other thread already attempted to execute init() and if IsInit
402 // is still false, return OFFLOAD_FAIL.
404 return OFFLOAD_SUCCESS;
409 // Load binary to device.
410 __tgt_target_table *DeviceTy::load_binary(void *Img) {
412 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img);
417 void *DeviceTy::allocData(int64_t Size, void *HstPtr) {
418 // If memory manager is enabled, we will allocate data via memory manager.
420 return MemoryManager->allocate(Size, HstPtr);
422 return RTL->data_alloc(RTLDeviceID, Size, HstPtr);
425 int32_t DeviceTy::deleteData(void *TgtPtrBegin) {
426 // If memory manager is enabled, we will deallocate data via memory manager.
428 return MemoryManager->free(TgtPtrBegin);
430 return RTL->data_delete(RTLDeviceID, TgtPtrBegin);
433 // Submit data to device
434 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size,
435 __tgt_async_info *AsyncInfoPtr) {
436 if (!AsyncInfoPtr || !RTL->data_submit_async || !RTL->synchronize)
437 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size);
439 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size,
443 // Retrieve data from device
444 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin,
445 int64_t Size, __tgt_async_info *AsyncInfoPtr) {
446 if (!AsyncInfoPtr || !RTL->data_retrieve_async || !RTL->synchronize)
447 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size);
449 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size,
453 // Copy data from current device to destination device directly
454 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr,
455 int64_t Size, __tgt_async_info *AsyncInfo) {
456 if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) {
457 assert(RTL->data_exchange && "RTL->data_exchange is nullptr");
458 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr,
461 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID,
462 DstPtr, Size, AsyncInfo);
465 // Run region on device
466 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr,
467 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize,
468 __tgt_async_info *AsyncInfoPtr) {
469 if (!AsyncInfoPtr || !RTL->run_region || !RTL->synchronize)
470 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets,
473 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr,
474 TgtOffsets, TgtVarsSize, AsyncInfoPtr);
477 // Run team region on device.
478 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr,
479 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize,
480 int32_t NumTeams, int32_t ThreadLimit,
481 uint64_t LoopTripCount,
482 __tgt_async_info *AsyncInfoPtr) {
483 if (!AsyncInfoPtr || !RTL->run_team_region_async || !RTL->synchronize)
484 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr,
485 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit,
488 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr,
489 TgtOffsets, TgtVarsSize, NumTeams,
490 ThreadLimit, LoopTripCount, AsyncInfoPtr);
493 // Whether data can be copied to DstDevice directly
494 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) {
495 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable)
498 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID))
499 return (RTL->data_exchange != nullptr) ||
500 (RTL->data_exchange_async != nullptr);
505 int32_t DeviceTy::synchronize(__tgt_async_info *AsyncInfoPtr) {
506 if (RTL->synchronize)
507 return RTL->synchronize(RTLDeviceID, AsyncInfoPtr);
508 return OFFLOAD_SUCCESS;
511 /// Check whether a device has an associated RTL and initialize it if it's not
512 /// already initialized.
513 bool device_is_ready(int device_num) {
514 DP("Checking whether device %d is ready.\n", device_num);
515 // Devices.size() can only change while registering a new
516 // library, so try to acquire the lock of RTLs' mutex.
518 size_t DevicesSize = PM->Devices.size();
519 PM->RTLsMtx.unlock();
520 if (DevicesSize <= (size_t)device_num) {
521 DP("Device ID %d does not have a matching RTL\n", device_num);
526 DeviceTy &Device = PM->Devices[device_num];
528 DP("Is the device %d (local ID %d) initialized? %d\n", device_num,
529 Device.RTLDeviceID, Device.IsInit);
531 // Init the device if not done before
532 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) {
533 DP("Failed to init device %d\n", device_num);
537 DP("Device %d is ready to use.\n", device_num);