|
| 1 | +// This test checks whether memory accesses to contiguous virtual memory ranges |
| 2 | +// are performed correctly |
| 3 | + |
| 4 | +// RUN: %{build} -o %t.out |
| 5 | +// RUN: %{run} %t.out |
| 6 | + |
| 7 | +#include <cassert> |
| 8 | + |
| 9 | +#include "helpers.hpp" |
| 10 | + |
| 11 | +struct VirtualAddressRange { |
| 12 | + VirtualAddressRange(uintptr_t Ptr, size_t Size) : MPtr{Ptr}, MSize{Size} {} |
| 13 | + |
| 14 | + uintptr_t MPtr; |
| 15 | + size_t MSize; |
| 16 | +}; |
| 17 | + |
| 18 | +struct PhysicalMemoryMapping { |
| 19 | + PhysicalMemoryMapping(syclext::physical_mem &&PhysicalMem, void *MappingPtr) |
| 20 | + : MPhysicalMem(std::move(PhysicalMem)), MMappingPtr(MappingPtr) {} |
| 21 | + syclext::physical_mem MPhysicalMem; |
| 22 | + void *MMappingPtr; |
| 23 | +}; |
| 24 | + |
| 25 | +int main() { |
| 26 | + int Failed = 0; |
| 27 | + sycl::queue Q; |
| 28 | + sycl::context Context = Q.get_context(); |
| 29 | + sycl::device Device = Q.get_device(); |
| 30 | + |
| 31 | + constexpr size_t NumberOfVirtualMemoryRanges = 5; |
| 32 | + constexpr size_t ElementsInRange = 100; |
| 33 | + constexpr int ValueSetInKernel = 999; |
| 34 | + |
| 35 | + size_t BytesRequiredPerRange = ElementsInRange * sizeof(int); |
| 36 | + |
| 37 | + size_t UsedGranularity = GetLCMGranularity(Device, Context); |
| 38 | + |
| 39 | + size_t AlignedByteSizePerRange = |
| 40 | + GetAlignedByteSize(BytesRequiredPerRange, UsedGranularity); |
| 41 | + |
| 42 | + std::vector<VirtualAddressRange> VirtualMemoryRanges; |
| 43 | + std::vector<PhysicalMemoryMapping> PhysicalMemoryMappings; |
| 44 | + |
| 45 | + for (size_t Index = 0; Index < NumberOfVirtualMemoryRanges; ++Index) { |
| 46 | + uintptr_t VirtualMemoryPtr = |
| 47 | + syclext::reserve_virtual_mem(AlignedByteSizePerRange, Context); |
| 48 | + syclext::physical_mem PhysicalMem{Device, Context, AlignedByteSizePerRange}; |
| 49 | + void *MappedPtr = PhysicalMem.map(VirtualMemoryPtr, AlignedByteSizePerRange, |
| 50 | + syclext::address_access_mode::read_write); |
| 51 | + |
| 52 | + VirtualMemoryRanges.emplace_back(VirtualMemoryPtr, AlignedByteSizePerRange); |
| 53 | + PhysicalMemoryMappings.emplace_back(std::move(PhysicalMem), MappedPtr); |
| 54 | + } |
| 55 | + |
| 56 | + std::vector<int> ResultHostData(ElementsInRange); |
| 57 | + |
| 58 | + for (size_t Index = 0; Index < NumberOfVirtualMemoryRanges; ++Index) { |
| 59 | + int *DataRangePtr = |
| 60 | + reinterpret_cast<int *>(PhysicalMemoryMappings[Index].MMappingPtr); |
| 61 | + |
| 62 | + Q.parallel_for(ElementsInRange, [=](sycl::id<1> Idx) { |
| 63 | + DataRangePtr[Idx] = ValueSetInKernel; |
| 64 | + }).wait_and_throw(); |
| 65 | + |
| 66 | + { |
| 67 | + sycl::buffer<int> ResultBuffer(ResultHostData); |
| 68 | + |
| 69 | + Q.submit([&](sycl::handler &Handle) { |
| 70 | + sycl::accessor A(ResultBuffer, Handle, sycl::write_only); |
| 71 | + Handle.parallel_for(ElementsInRange, [=](sycl::id<1> Idx) { |
| 72 | + A[Idx] = DataRangePtr[Idx]; |
| 73 | + }); |
| 74 | + }); |
| 75 | + } |
| 76 | + |
| 77 | + for (size_t i = 0; i < ElementsInRange; i++) { |
| 78 | + if (ResultHostData[i] != ValueSetInKernel) { |
| 79 | + std::cout << "Comparison failed with virtual range " << Index + 1 |
| 80 | + << " at index " << i << ": " << ResultHostData[i] |
| 81 | + << " != " << ValueSetInKernel << std::endl; |
| 82 | + ++Failed; |
| 83 | + } |
| 84 | + } |
| 85 | + } |
| 86 | + |
| 87 | + for (auto PhysMemMap : PhysicalMemoryMappings) { |
| 88 | + syclext::unmap(PhysMemMap.MMappingPtr, PhysMemMap.MPhysicalMem.size(), |
| 89 | + Context); |
| 90 | + } |
| 91 | + for (auto VirtualMemRange : VirtualMemoryRanges) { |
| 92 | + syclext::free_virtual_mem(VirtualMemRange.MPtr, VirtualMemRange.MSize, |
| 93 | + Context); |
| 94 | + } |
| 95 | + |
| 96 | + return Failed; |
| 97 | +} |
0 commit comments