repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/histogram-slm-256.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <cstdint>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int NUM_BINS = 256;
constexpr int BLOCK_SIZE = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
sycl::accessor hacc(hbuf, h, sycl::read_write);
sycl::local_accessor<unsigned int> local_histogram(sycl::range(NUM_BINS),
h);
h.parallel_for(
sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
int factor = NUM_BINS / gSize;
int local_id = it.get_local_id()[0];
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
local_bin.store(0);
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
local_bin.store(0);
}
}
it.barrier(sycl::access::fence_space::local_space);
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
#pragma unroll
for (std::uint8_t shift : {0, 8, 16, 24, 32, 40, 48, 56}) {
constexpr unsigned long mask = 0xFFU;
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[(x >> shift) & mask]);
local_bin += 1;
}
}
it.barrier(sycl::access::fence_space::local_space);
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[local_id]);
global_bin += local_bin.load();
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[gSize * k + local_id]);
global_bin += local_bin.load();
}
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/slm-bank-s1.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 32;
int *data = sycl::malloc_shared<int>(N, q);
auto e = q.submit([&](auto &h) {
sycl::local_accessor<int, 1> slm(sycl::range(32 * 64), h);
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int j = it.get_local_linear_id();
slm[j] = 0;
it.barrier(sycl::access::fence_space::local_space);
for (int m = 0; m < 1024 * 1024; m++) {
slm[j] += i * m;
it.barrier(sycl::access::fence_space::local_space);
}
data[i] = slm[j];
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/slm-size.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main(void) {
sycl::queue q{sycl::gpu_selector_v};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
std::cout << "Local Memory Size: "
<< q.get_device().get_info<sycl::info::device::local_mem_size>()
<< std::endl;
// Snippet end
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/convolution-global.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (size_t i = 0; i < N; ++i) {
input[i] = rand();
}
for (size_t i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
{
// Snippet begin
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(sycl::range{N}, sycl::range{256}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int t = 0;
int _M = static_cast<int>(M);
int _N = static_cast<int>(N);
if ((group == 0) || (group == _N / gSize - 1)) {
if (i < _M / 2) {
for (int j = _M / 2 - i, k = 0; j < _M; ++j, ++k) {
t += iacc[k] * kacc[j];
}
} else {
if (i + _M / 2 >= _N) {
for (int j = 0, k = i - _M / 2;
j < _M / 2 + _N - i; ++j, ++k) {
t += iacc[k] * kacc[j];
}
} else {
for (int j = 0, k = i - _M / 2; j < _M; ++j, ++k) {
t += iacc[k] * kacc[j];
}
}
}
} else {
for (int j = 0, k = i - _M / 2; j < _M; ++j, ++k) {
t += iacc[k] * kacc[j];
}
}
oacc[i] = t;
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/slm-bank-s16.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
int main() {
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< std::endl;
// Snippet begin
constexpr int N = 32;
int *data = sycl::malloc_shared<int>(N, q);
auto e = q.submit([&](auto &h) {
sycl::local_accessor<int, 1> slm(sycl::range(32 * 64), h);
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int j = it.get_local_linear_id();
slm[j * 16] = 0;
it.barrier(sycl::access::fence_space::local_space);
for (int m = 0; m < 1024 * 1024; m++) {
slm[j * 16] += i * m;
it.barrier(sycl::access::fence_space::local_space);
}
data[i] = slm[j * 16];
});
});
// Snippet end
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>())
<< " ns" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/slm/histogram-slm-1024.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
#include <vector>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 1024;
input[i] |= ((long)rand() % 1024) << 16;
input[i] |= ((long)rand() % 1024) << 32;
input[i] |= ((long)rand() % 1024) << 48;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int NUM_BINS = 1024;
constexpr int BLOCK_SIZE = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
sycl::accessor hacc(hbuf, h, sycl::read_write);
sycl::local_accessor<unsigned int, 1> local_histogram(sycl::range(NUM_BINS),
h);
h.parallel_for(
sycl::nd_range(sycl::range{N / BLOCK_SIZE}, sycl::range{64}),
[=](sycl::nd_item<1> it) {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
int factor = NUM_BINS / gSize;
int local_id = it.get_local_id()[0];
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
local_bin.store(0);
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
local_bin.store(0);
}
}
it.barrier(sycl::access::fence_space::local_space);
for (int k = 0; k < BLOCK_SIZE; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * BLOCK_SIZE +
sgGroup * sgSize * BLOCK_SIZE + sgSize * k);
#pragma unroll
for (std::uint8_t shift : {0, 16, 32, 48}) {
constexpr unsigned long mask = 0x3FFU;
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[(x >> shift) & mask]);
local_bin += 1;
}
}
it.barrier(sycl::access::fence_space::local_space);
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[local_id]);
global_bin += local_bin.load();
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[gSize * k + local_id]);
global_bin += local_bin.load();
}
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/memory-sharing-with-media/memory-sharing-vaapi.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// SYCL
#include <CL/sycl.hpp>
// SYCL oneAPI extension
#include <sycl/ext/oneapi/backend/level_zero.hpp>
// Level-zero
#include <level_zero/ze_api.h>
// VA-API
#include <va/va_drm.h>
#include <va/va_drmcommon.h>
#include <cstdio>
#include <fcntl.h>
#include <unistd.h>
#include <vector>
#define OUTPUT_FILE "output.bgra"
#define VAAPI_DEVICE "/dev/dri/renderD128"
#define FRAME_WIDTH 320
#define FRAME_HEIGHT 240
#define RECT_WIDTH 160
#define RECT_HEIGHT 160
#define RECT_Y (FRAME_HEIGHT - RECT_HEIGHT) / 2
#define NUM_FRAMES (FRAME_WIDTH - RECT_WIDTH)
#define VA_FORMAT VA_FOURCC_BGRA
#define RED 0xffff0000
#define GREEN 0xff00ff00
#define BLUE 0xff0000ff
#define CHECK_STS(_FUNC) \
{ \
auto _sts = _FUNC; \
if (_sts != 0) { \
printf("Error %d calling " #_FUNC, (int)_sts); \
return -1; \
} \
}
VASurfaceID alloc_va_surface(VADisplay va_display, int width, int height) {
VASurfaceID va_surface;
VASurfaceAttrib surface_attrib{};
surface_attrib.type = VASurfaceAttribPixelFormat;
surface_attrib.flags = VA_SURFACE_ATTRIB_SETTABLE;
surface_attrib.value.type = VAGenericValueTypeInteger;
surface_attrib.value.value.i = VA_FORMAT;
vaCreateSurfaces(va_display, VA_RT_FORMAT_RGB32, width, height, &va_surface,
1, &surface_attrib, 1);
return va_surface;
}
int main() {
// Create SYCL queue on GPU device and Level-zero backend, and query
// Level-zero context and device
sycl::queue sycl_queue{sycl::ext::oneapi::filter_selector(
"level_zero")}; // { sycl::gpu_selector() }
auto ze_context = sycl::get_native<sycl::backend::ext_oneapi_level_zero>(
sycl_queue.get_context());
auto ze_device = sycl::get_native<sycl::backend::ext_oneapi_level_zero>(
sycl_queue.get_device());
// Create VA-API context (VADisplay)
VADisplay va_display = vaGetDisplayDRM(open(VAAPI_DEVICE, O_RDWR));
if (!va_display) {
printf("Error creating VADisplay on device %s\n", VAAPI_DEVICE);
return -1;
}
int major = 0, minor = 0;
CHECK_STS(vaInitialize(va_display, &major, &minor));
// Create VA-API surfaces
VASurfaceID surfaces[NUM_FRAMES];
for (int i = 0; i < NUM_FRAMES; i++) {
surfaces[i] = alloc_va_surface(va_display, FRAME_WIDTH, FRAME_HEIGHT);
}
// Convert each VA-API surface into USM device pointer (zero-copy buffer
// sharing between VA-API and Level-zero)
void *device_ptr[NUM_FRAMES];
size_t stride;
for (int i = 0; i < NUM_FRAMES; i++) {
// Export DMA-FD from VASurface
VADRMPRIMESurfaceDescriptor prime_desc{};
CHECK_STS(vaExportSurfaceHandle(va_display, surfaces[i],
VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
VA_EXPORT_SURFACE_READ_WRITE, &prime_desc));
auto dma_fd = prime_desc.objects->fd;
auto dma_size = prime_desc.objects->size;
stride = prime_desc.layers[0].pitch[0] / sizeof(uint32_t);
// Import DMA-FD into Level-zero device pointer
ze_external_memory_import_fd_t import_fd = {
ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMPORT_FD,
nullptr, // pNext
ZE_EXTERNAL_MEMORY_TYPE_FLAG_DMA_BUF, dma_fd};
ze_device_mem_alloc_desc_t alloc_desc = {};
alloc_desc.stype = ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC;
alloc_desc.pNext = &import_fd;
CHECK_STS(zeMemAllocDevice(ze_context, &alloc_desc, dma_size, 1, ze_device,
&device_ptr[i]));
// Close DMA-FD
close(dma_fd);
}
// Create VA-API surface with size 1x1 and write GREEN pixel
VASurfaceID surface1x1 = alloc_va_surface(va_display, 1, 1);
VAImage va_image;
void *data = nullptr;
CHECK_STS(vaDeriveImage(va_display, surface1x1, &va_image));
CHECK_STS(vaMapBuffer(va_display, va_image.buf, &data));
*(uint32_t *)data = GREEN;
CHECK_STS(vaUnmapBuffer(va_display, va_image.buf));
CHECK_STS(vaDestroyImage(va_display, va_image.image_id));
// VA-API call to fill background with BLUE color and upscale 1x1 surface into
// moving GREEN rectangle
VAConfigID va_config_id;
VAContextID va_context_id;
CHECK_STS(vaCreateConfig(va_display, VAProfileNone, VAEntrypointVideoProc,
nullptr, 0, &va_config_id));
CHECK_STS(vaCreateContext(va_display, va_config_id, 0, 0, VA_PROGRESSIVE,
nullptr, 0, &va_context_id));
for (int i = 0; i < NUM_FRAMES; i++) {
VAProcPipelineParameterBuffer param{};
param.output_background_color = BLUE;
param.surface = surface1x1;
VARectangle output_region = {int16_t(i), RECT_Y, RECT_WIDTH, RECT_HEIGHT};
param.output_region = &output_region;
VABufferID param_buf;
CHECK_STS(vaCreateBuffer(va_display, va_context_id,
VAProcPipelineParameterBufferType, sizeof(param),
1, ¶m, ¶m_buf));
CHECK_STS(vaBeginPicture(va_display, va_context_id, surfaces[i]));
CHECK_STS(vaRenderPicture(va_display, va_context_id, ¶m_buf, 1));
CHECK_STS(vaEndPicture(va_display, va_context_id));
CHECK_STS(vaDestroyBuffer(va_display, param_buf));
}
#if 0
// Synchronization is optional on Linux OS as i915 KMD driver synchronizes
// write/read commands submitted from Intel media and compute drivers
for (int i = 0; i < NUM_FRAMES; i++) {
CHECK_STS(vaSyncSurface(va_display, surfaces[i]));
}
#endif
// Submit SYCL kernels to write RED sub-rectangle inside GREEN rectangle
std::vector<sycl::event> sycl_events(NUM_FRAMES);
for (int i = 0; i < NUM_FRAMES; i++) {
uint32_t *ptr = (uint32_t *)device_ptr[i] +
(RECT_Y + RECT_HEIGHT / 4) * stride + (i + RECT_WIDTH / 4);
sycl_events[i] = sycl_queue.parallel_for(
sycl::range<2>(RECT_HEIGHT / 2, RECT_WIDTH / 2), [=](sycl::id<2> idx) {
auto y = idx.get(0);
auto x = idx.get(1);
ptr[y * stride + x] = RED;
});
}
// Synchronize all SYCL kernels
sycl::event::wait(sycl_events);
// Map VA-API surface to system memory and write to file
FILE *file = fopen(OUTPUT_FILE, "wb");
if (!file) {
printf("Error creating file %s\n", OUTPUT_FILE);
return -1;
}
for (int i = 0; i < NUM_FRAMES; i++) {
CHECK_STS(vaDeriveImage(va_display, surfaces[i], &va_image));
CHECK_STS(vaMapBuffer(va_display, va_image.buf, &data));
fwrite(data, 1, FRAME_HEIGHT * FRAME_WIDTH * 4, file);
CHECK_STS(vaUnmapBuffer(va_display, va_image.buf));
CHECK_STS(vaDestroyImage(va_display, va_image.image_id));
}
fclose(file);
printf("Created file %s\n", OUTPUT_FILE);
// Free device pointers and VA-API surfaces
for (int i = 0; i < NUM_FRAMES; i++)
zeMemFree(ze_context, device_ptr[i]);
vaDestroySurfaces(va_display, surfaces, NUM_FRAMES);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/fp-computations/test_log_omp.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <iostream>
#include <assert.h>
#include <chrono>
#include <cmath>
#if FP_SIZE == 32
typedef float FP_TYPE;
static constexpr FP_TYPE VALIDATION_THRESHOLD = 1e-3;
#elif FP_SIZE == 64
typedef double FP_TYPE;
static constexpr FP_TYPE VALIDATION_THRESHOLD = 1e-6;
#endif
template<typename T>
void do_work (unsigned NELEMENTS, unsigned NREPETITIONS, T initial_value, T *res)
{
#pragma omp target teams distribute parallel for map(present,alloc:res[0:NELEMENTS])
for (unsigned j = 0; j < NELEMENTS; j++)
{
T tmp = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
tmp += std::log(tmp);
res[j] = tmp;
}
}
int main (int argc, char *argv[])
{
static constexpr unsigned NELEMENTS = 64*1024*1024;
static constexpr unsigned NREPETITIONS = 1024;
#pragma omp target
{ }
FP_TYPE initial_value = 2;
FP_TYPE ref_res = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
ref_res += std::log(ref_res);
std::cout << "reference result = " << ref_res << std::endl;
{
FP_TYPE * std_res = new FP_TYPE[NELEMENTS];
assert (std_res != nullptr);
std::chrono::duration<float, std::micro> elapsed;
#pragma omp target data map(std_res[0:NELEMENTS])
{
auto tbegin = std::chrono::system_clock::now();
do_work<FP_TYPE> (NELEMENTS, NREPETITIONS, initial_value, std_res);
auto tend = std::chrono::system_clock::now();
elapsed = tend - tbegin;
}
std::cout << "std::log result[0] = " << std_res[0] << std::endl;
bool allequal = true;
for (auto i = 1; i < NELEMENTS; ++i)
allequal = allequal and std_res[0] == std_res[i];
if (allequal)
{
if (std::abs(ref_res - std_res[0])/std::abs(ref_res) < std::abs(VALIDATION_THRESHOLD))
std::cout << "std::log validates. Total execution time is " << elapsed.count() << " us." << std::endl;
else
std::cout << "std::log does not validate (ref=" << ref_res << " std_res=" << std_res[0] << " mix=" << std::abs(ref_res - std_res[0])/std::abs(ref_res) << ")" << std::endl;
}
else
std::cout << "std::log does not validate, results are not equal." << std::endl;
delete [] std_res;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/fp-computations/test_log_omp_f.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <iostream>
#include <assert.h>
#include <chrono>
#include <cmath>
#if FP_SIZE == 32
typedef float FP_TYPE;
static constexpr FP_TYPE VALIDATION_THRESHOLD = 1e-3;
extern "C" void log_real_sp (int, int, FP_TYPE, FP_TYPE *);
#elif FP_SIZE == 64
typedef double FP_TYPE;
static constexpr FP_TYPE VALIDATION_THRESHOLD = 1e-6;
extern "C" void log_real_dp (int, int, FP_TYPE, FP_TYPE *);
#endif
int main (int argc, char *argv[])
{
static constexpr unsigned NELEMENTS = 64*1024*1024;
static constexpr int NREPETITIONS = 1024;
#pragma omp target
{ }
FP_TYPE initial_value = 2;
FP_TYPE ref_res = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
ref_res += std::log(ref_res);
std::cout << "reference result = " << ref_res << std::endl;
{
FP_TYPE * std_res = new FP_TYPE[NELEMENTS];
assert (std_res != nullptr);
std::chrono::duration<float, std::micro> elapsed;
#pragma omp target data map(std_res[0:NELEMENTS])
{
auto tbegin = std::chrono::system_clock::now();
{
# if FP_SIZE == 32
log_real_sp (NELEMENTS, NREPETITIONS, initial_value, std_res);
# elif FP_SIZE == 64
log_real_dp (NELEMENTS, NREPETITIONS, initial_value, std_res);
# endif
}
auto tend = std::chrono::system_clock::now();
elapsed = tend - tbegin;
}
std::cout << "std::log result[0] = " << std_res[0] << std::endl;
bool allequal = true;
for (auto i = 1; i < NELEMENTS; ++i)
allequal = allequal and std_res[0] == std_res[i];
if (allequal)
{
if (std::abs(ref_res - std_res[0])/std::abs(ref_res) < std::abs(VALIDATION_THRESHOLD))
std::cout << "std::log validates. Total execution time is " << elapsed.count() << " us." << std::endl;
else
std::cout << "std::log does not validate (ref=" << ref_res << " std_res=" << std_res[0] << " mix=" << std::abs(ref_res - std_res[0])/std::abs(ref_res) << ")" << std::endl;
}
else
std::cout << "std::log does not validate, results are not equal." << std::endl;
delete [] std_res;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/fp-computations/test_log_sycl.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
#include <CL/sycl.hpp>
#include <iostream>
#include <assert.h>
#include <chrono>
#include <cmath>
#if FP_SIZE == 32
typedef float FP_TYPE;
static constexpr FP_TYPE VALIDATION_THRESHOLD = 1e-3;
#elif FP_SIZE == 64
typedef double FP_TYPE;
static constexpr FP_TYPE VALIDATION_THRESHOLD = 1e-6;
#endif
template<typename T>
void do_work_std (sycl::queue &q, unsigned NELEMENTS, unsigned NREPETITIONS, T initial_value, T *res)
{
q.submit([&](sycl::handler& h) {
h.parallel_for(NELEMENTS, [=] (auto j)
{
FP_TYPE tmp = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
tmp += std::log(tmp);
res[j] = tmp;
});
}).wait();
}
template<typename T>
void do_work_sycl (sycl::queue &q, unsigned NELEMENTS, unsigned NREPETITIONS, T initial_value, T *res)
{
q.submit([&](sycl::handler& h) {
h.parallel_for(NELEMENTS, [=] (auto j)
{
FP_TYPE tmp = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
tmp += sycl::log(tmp);
res[j] = tmp;
});
}).wait();
}
# if FP_SIZE == 32
template<typename T>
void do_work_sycl_native (sycl::queue &q, unsigned NELEMENTS, unsigned NREPETITIONS, T initial_value, T *res)
{
q.submit([&](sycl::handler& h) {
h.parallel_for(NELEMENTS, [=] (auto j)
{
FP_TYPE tmp = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
tmp += sycl::native::log(tmp);
res[j] = tmp;
});
}).wait();
}
# endif
int main (int argc, char *argv[])
{
static constexpr unsigned NELEMENTS = 64*1024*1024;
static constexpr unsigned NREPETITIONS = 1024;
sycl::device d (sycl::gpu_selector_v);
sycl::queue q (d);
q.submit([&](sycl::handler& h) {
h.single_task ([=]() { });
}).wait();
FP_TYPE initial_value = 2;
FP_TYPE ref_res = initial_value;
for (unsigned i = 0; i < NREPETITIONS; ++i)
ref_res += std::log(ref_res);
std::cout << "reference result = " << ref_res << std::endl;
{
FP_TYPE * std_res = new FP_TYPE[NELEMENTS];
assert (std_res != nullptr);
std::chrono::duration<float, std::micro> elapsed;
{
auto * res = sycl::malloc_device<FP_TYPE>(NELEMENTS, q);
auto tbegin = std::chrono::system_clock::now();
do_work_std<FP_TYPE>(q, NELEMENTS, NREPETITIONS, initial_value, res);
auto tend = std::chrono::system_clock::now();
elapsed = tend - tbegin;
q.memcpy (std_res, res, NELEMENTS*sizeof(FP_TYPE)).wait();
sycl::free (res, q);
}
std::cout << "std::log result[0] = " << std_res[0] << std::endl;
bool allequal = true;
for (auto i = 1; i < NELEMENTS; ++i)
allequal = allequal and std_res[0] == std_res[i];
if (allequal)
{
if (std::abs(ref_res - std_res[0])/std::abs(ref_res) < std::abs(VALIDATION_THRESHOLD))
std::cout << "std::log validates. Total execution time is " << elapsed.count() << " us." << std::endl;
else
std::cout << "std::log does not validate (ref=" << ref_res << " std_res=" << std_res[0] << " mix=" << std::abs(ref_res - std_res[0])/std::abs(ref_res) << ")" << std::endl;
}
else
std::cout << "std::log does not validate, results are not equal." << std::endl;
delete [] std_res;
}
{
FP_TYPE * sycl_res = new FP_TYPE[NELEMENTS];
assert (sycl_res != nullptr);
std::chrono::duration<float, std::micro> elapsed;
{
auto * res = sycl::malloc_device<FP_TYPE>(NELEMENTS, q);
auto tbegin = std::chrono::system_clock::now();
do_work_sycl<FP_TYPE>(q, NELEMENTS, NREPETITIONS, initial_value, res);
auto tend = std::chrono::system_clock::now();
elapsed = tend - tbegin;
q.memcpy (sycl_res, res, NELEMENTS*sizeof(FP_TYPE)).wait();
sycl::free (res, q);
}
std::cout << "sycl::log result[0] = " << sycl_res[0] << std::endl;
bool allequal = true;
for (auto i = 1; i < NELEMENTS; ++i)
allequal = allequal and sycl_res[0] == sycl_res[i];
if (allequal)
{
if (std::abs(ref_res - sycl_res[0])/std::abs(ref_res) < std::abs(VALIDATION_THRESHOLD))
std::cout << "sycl::log validates. Total execution time is " << elapsed.count() << " us." << std::endl;
else
std::cout << "sycl::log does not validate (ref=" << ref_res << " sycl_res=" << sycl_res[0] << " mix=" << std::abs(ref_res - sycl_res[0])/std::abs(ref_res) << ")" << std::endl;
}
else
std::cout << "sycl::log does not validate, results are not equal." << std::endl;
delete [] sycl_res;
}
# if FP_SIZE == 32
{
FP_TYPE * sycl_native_res = new FP_TYPE[NELEMENTS];
assert (sycl_native_res != nullptr);
std::chrono::duration<float, std::micro> elapsed;
{
auto * res = sycl::malloc_device<FP_TYPE>(NELEMENTS, q);
auto tbegin = std::chrono::system_clock::now();
do_work_sycl_native<FP_TYPE>(q, NELEMENTS, NREPETITIONS, initial_value, res);
auto tend = std::chrono::system_clock::now();
elapsed = tend - tbegin;
q.memcpy (sycl_native_res, res, NELEMENTS*sizeof(FP_TYPE)).wait();
sycl::free (res, q);
}
std::cout << "sycl::native::log result[0] = " << sycl_native_res[0] << std::endl;
bool allequal = true;
for (auto i = 1; i < NELEMENTS; ++i)
allequal = allequal and sycl_native_res[0] == sycl_native_res[i];
if (allequal)
{
if (std::abs(ref_res - sycl_native_res[0])/std::abs(ref_res) < std::abs(VALIDATION_THRESHOLD))
std::cout << "sycl::native::log validates. Total execution time is " << elapsed.count() << " us." << std::endl;
else
std::cout << "sycl::native::log does not validate (ref=" << ref_res << " sycl_native_res=" << sycl_native_res[0] << " mix=" << std::abs(ref_res - sycl_native_res[0])/std::abs(ref_res) << ")" << std::endl;
}
else
std::cout << "sycl::native::log does not validate, results are not equal." << std::endl;
delete [] sycl_native_res;
}
# endif // FP_SIZE == 32
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/redundant-queues/queues.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
// Summation of 256k 'one' values
constexpr size_t N = 1024 * 1024;
// Number of repetitions
constexpr int repetitions = 10000;
// expected vlaue of sum
int sum_expected = N;
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
int reductionSingleQ(std::vector<int> &data, int iter) {
const size_t data_size = data.size();
int sum = 0;
int work_group_size = 512;
int num_work_groups = 1;
int num_work_items = work_group_size;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::queue q{sycl::default_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
double elapsed = 0;
for (int i = 0; i < iter; i++) {
// reductionIntBarrier main begin
Timer timer;
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t loc_id = item.get_local_id(0);
int sum = 0;
for (int i = loc_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
sum_acc[0] = scratch[0];
});
});
// reductionSingleQ main end
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time reductionSingleQ = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: reductionSingleQ Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end reductionSingleQ
int reductionMultipleQMultipleC(std::vector<int> &data, int iter) {
const size_t data_size = data.size();
int sum = 0;
int work_group_size = 512;
int num_work_groups = 1;
int num_work_items = work_group_size;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::queue q1{sycl::default_selector_v, exception_handler};
// initialize data on the device
q1.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
double elapsed = 0;
for (int i = 0; i < iter; i++) {
sycl::queue q2{sycl::default_selector_v, exception_handler};
if (i == 0)
std::cout << q2.get_device().get_info<sycl::info::device::name>() << "\n";
// reductionMultipleQMultipleC main begin
Timer timer;
q2.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t loc_id = item.get_local_id(0);
int sum = 0;
for (int i = loc_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
sum_acc[0] = scratch[0];
});
});
// reductionMultipleQMultipleC main end
q2.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time reductionMultipleQMultipleC = " << elapsed
<< "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: reductionMultipleQMultipleC Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end reductionMultipleQMultipleC
int reductionMultipleQSingleC(std::vector<int> &data, int iter) {
const size_t data_size = data.size();
int sum = 0;
int work_group_size = 512;
int num_work_groups = 1;
int num_work_items = work_group_size;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::queue q1{sycl::default_selector_v, exception_handler};
// initialize data on the device
q1.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
double elapsed = 0;
for (int i = 0; i < iter; i++) {
sycl::queue q2{q1.get_context(), sycl::default_selector_v,
exception_handler};
if (i == 0)
std::cout << q2.get_device().get_info<sycl::info::device::name>() << "\n";
// reductionMultipleQSingleC main begin
Timer timer;
q2.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t loc_id = item.get_local_id(0);
int sum = 0;
for (int i = loc_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
sum_acc[0] = scratch[0];
});
});
// reductionMultipleQSingleC main end
q2.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time reductionMultipleQSingleContext = " << elapsed
<< "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: reductionMultipleQSingleContext Expected "
<< sum_expected << " but got " << sum << "\n";
return sum;
} // end reductionMultipleQSingleC
int main(int argc, char *argv[]) {
std::vector<int> data(N, 1);
reductionSingleQ(data, 100);
reductionMultipleQMultipleC(data, 100);
reductionMultipleQSingleC(data, 100);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/joint-matrix/joint-matrix.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
#include <sycl/sycl.hpp>
using use = sycl::ext::oneapi::experimental::matrix::use;
using layout = sycl::ext::oneapi::experimental::matrix::layout;
using bfloat16 = sycl::ext::oneapi::bfloat16;
constexpr size_t SG_SZ = 16;
constexpr size_t TM = 8;
constexpr size_t TN = SG_SZ;
constexpr size_t TK = 16;
constexpr float ALPHA = 2.0;
constexpr float BF16_EPSILON = 0.00781250;
template <typename T, size_t NUM_ROWS, size_t NUM_COLS> struct big_matrix {
private:
T *mat;
public:
T *get_data() { return mat; }
void set_data(T *data) { mat = data; }
big_matrix(T *data) : mat(data) {}
};
template <typename T1, typename T2, size_t M, size_t N, size_t K>
void matrix_multiply(big_matrix<T1, M, N> &C, big_matrix<T2, M, K> &A,
big_matrix<T2, K / 2, N * 2> &B) {
// kernel begin
size_t NDRangeM = M / TM;
size_t NDRangeN = N / TN;
sycl::buffer<bfloat16, 2> bufA(A.get_data(), sycl::range<2>(M, K));
sycl::buffer<bfloat16, 2> bufB(B.get_data(), sycl::range<2>(K, N));
sycl::buffer<float, 2> bufC((float *)C.get_data(), sycl::range<2>(M, N));
sycl::queue q;
q.submit([&](sycl::handler &cgh) {
sycl::accessor accC(bufC, cgh, sycl::read_write);
sycl::accessor accA(bufA, cgh, sycl::read_only);
sycl::accessor accB(bufB, cgh, sycl::read_only);
cgh.parallel_for(
sycl::nd_range<2>({NDRangeM, NDRangeN * SG_SZ}, {1, 1 * SG_SZ}),
[=](sycl::nd_item<2> spmd_item) [[intel::reqd_sub_group_size(SG_SZ)]]
{
// The joint matrix API has to be accessed by all the workitems in a
// subgroup these functions will be called once by the subgroup no
// code divergence between the workitems
const auto global_idx = spmd_item.get_global_id(0);
const auto global_idy = spmd_item.get_global_id(1);
const auto sg_startx = global_idx - spmd_item.get_local_id(0);
const auto sg_starty = global_idy - spmd_item.get_local_id(1);
sycl::sub_group sg = spmd_item.get_sub_group();
sycl::ext::oneapi::experimental::matrix::joint_matrix<
sycl::sub_group, bfloat16, use::a, TM, TK, layout::row_major>
sub_a;
// For B, we assume B has been already VNNIed.
sycl::ext::oneapi::experimental::matrix::joint_matrix<
sycl::sub_group, bfloat16, use::b, TK, TN,
layout::ext_intel_packed>
sub_b;
sycl::ext::oneapi::experimental::matrix::joint_matrix<
sycl::sub_group, float, use::accumulator, TM, TN>
sub_c;
joint_matrix_fill(sg, sub_c, 1.0);
for (int k = 0; k < K / TK; k += 1) {
joint_matrix_load(
sg, sub_a,
accA.template get_multi_ptr<sycl::access::decorated::no>() +
(sg_startx * TM) * K + k * TK,
K);
joint_matrix_load(
sg, sub_b,
accB.template get_multi_ptr<sycl::access::decorated::no>() +
(k * TK / 2) * (N * 2) + sg_starty / SG_SZ * TN * 2,
N * 2);
joint_matrix_mad(sg, sub_c, sub_a, sub_b, sub_c);
}
joint_matrix_apply(sg, sub_c, [=](float &x) { x *= ALPHA; });
joint_matrix_store(
sg, sub_c,
accC.template get_multi_ptr<sycl::access::decorated::no>() +
(sg_startx * TM) * N + sg_starty / SG_SZ * TN,
N, layout::row_major);
}); // parallel for
}).wait();
// kernel end
}
static constexpr size_t MATRIX_M = TM * 2;
static constexpr size_t MATRIX_N = TN * 2;
static constexpr size_t MATRIX_K = TK * 2;
bfloat16 A[MATRIX_M][MATRIX_K];
bfloat16 B[MATRIX_K / 2][MATRIX_N * 2];
float C[MATRIX_M][MATRIX_N];
float D[MATRIX_M][MATRIX_N];
float make_fp32(bfloat16 x) {
unsigned int y = *((int *)&x);
y = y << 16;
float *res = reinterpret_cast<float *>(&y);
return *res;
}
void matrix_multiply_ref(int *A_mem, int *B_mem, int *C_mem, int M, int N,
int K) {
for (int m = 0; m < M; m++)
for (int n = 0; n < N; n++) {
for (int k = 0; k < K; k++) {
// Because B was assumed VNNIed
bfloat16 *va = (bfloat16 *)(A_mem + m * K + k);
bfloat16 *vb = (bfloat16 *)(B_mem + k * N + n);
float acc = *((float *)(C_mem + m * N + n));
for (int i = 0; i < 2; i++) {
acc += (make_fp32(va[i]) * make_fp32(vb[i]));
}
*((float *)(C_mem + m * N + n)) = acc;
}
*((float *)(C_mem + m * N + n)) *= ALPHA;
}
}
int main() {
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_K; j++) {
A[i][j] = bfloat16(1.0f * (i + j));
}
}
for (int i = 0; i < MATRIX_K / 2; i++) {
for (int j = 0; j < MATRIX_N * 2; j++) {
B[i][j] = bfloat16(2.0f * i + 3.0f * j);
}
}
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_N; j++) {
C[i][j] = 1.0;
D[i][j] = 1.0;
}
}
big_matrix<float, MATRIX_M, MATRIX_N> MC((float *)&C);
big_matrix<float, MATRIX_M, MATRIX_N> MD((float *)&D);
big_matrix<bfloat16, MATRIX_M, MATRIX_K> MA((bfloat16 *)&A);
big_matrix<bfloat16, MATRIX_K / 2, MATRIX_N * 2> MB((bfloat16 *)&B);
matrix_multiply(MC, MA, MB);
matrix_multiply_ref((int32_t *)A, (int32_t *)B, (int32_t *)D, MATRIX_M,
MATRIX_N, MATRIX_K / 2);
bool res = true;
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_N; j++) {
if ((fabs(C[i][j] - D[i][j])) > BF16_EPSILON)
res = false;
}
}
std::cout << (res ? "passed" : "failed") << std::endl;
return !res;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-stdlib/rng_test.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <iostream>
#include <oneapi/dpl/random>
#include <oneapi/mkl/rng.hpp>
int main(int argc, char **argv) {
unsigned int N = (argc == 1) ? 20 : std::stoi(argv[1]);
if (N < 20)
N = 20;
// Generate sequences of random numbers between [0.0, 1.0] using oneDPL and
// oneMKL
sycl::queue Q(sycl::gpu_selector_v);
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
auto test1 = sycl::malloc_shared<float>(N, Q.get_device(), Q.get_context());
auto test2 = sycl::malloc_shared<float>(N, Q.get_device(), Q.get_context());
std::uint32_t seed = (unsigned)time(NULL); // Get RNG seed value
// oneDPL random number generator on GPU device
clock_t start_time = clock(); // Start timer
Q.parallel_for(N, [=](auto idx) {
oneapi::dpl::minstd_rand rng_engine(seed, idx); // Initialize RNG engine
oneapi::dpl::uniform_real_distribution<float>
rng_distribution; // Set RNG distribution
test1[idx] = rng_distribution(rng_engine); // Generate RNG sequence
}).wait();
clock_t end_time = clock(); // Stop timer
std::cout << "oneDPL took " << float(end_time - start_time) / CLOCKS_PER_SEC
<< " seconds to generate " << N
<< " uniformly distributed random numbers." << std::endl;
// oneMKL random number generator on GPU device
start_time = clock(); // Start timer
oneapi::mkl::rng::mcg31m1 engine(
Q, seed); // Initialize RNG engine, set RNG distribution
oneapi::mkl::rng::uniform<float, oneapi::mkl::rng::uniform_method::standard>
rng_distribution(0.0, 1.0);
oneapi::mkl::rng::generate(rng_distribution, engine, N, test2)
.wait(); // Generate RNG sequence
end_time = clock(); // Stop timer
std::cout << "oneMKL took " << float(end_time - start_time) / CLOCKS_PER_SEC
<< " seconds to generate " << N
<< " uniformly distributed random numbers." << std::endl;
// Show first ten random numbers from each method
std::cout << std::endl
<< "oneDPL"
<< "\t"
<< "oneMKL" << std::endl;
for (int i = 0; i < 10; i++)
std::cout << test1[i] << " " << test2[i] << std::endl;
// Show last ten random numbers from each method
std::cout << "..." << std::endl;
for (size_t i = N - 10; i < N; i++)
std::cout << test1[i] << " " << test2[i] << std::endl;
// Cleanup
sycl::free(test1, Q.get_context());
sycl::free(test2, Q.get_context());
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-stdlib/external_rand.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Compile:
// dpcpp -D{HOST|CPU|GPU} -std=c++17 -fsycl external_rand.cpp -o external_rand
// Snippet begin
#include <CL/sycl.hpp>
#include <iostream>
#include <random>
constexpr int N = 5;
extern SYCL_EXTERNAL int rand(void);
int main(void) {
#if defined CPU
sycl::queue Q(sycl::cpu_selector_v);
#elif defined GPU
sycl::queue Q(sycl::gpu_selector_v);
#else
sycl::queue Q(sycl::default_selector_v);
#endif
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
// Attempt to use rand() inside a DPC++ kernel
auto test1 = sycl::malloc_shared<float>(N, Q.get_device(), Q.get_context());
srand((unsigned)time(NULL));
Q.parallel_for(N, [=](auto idx) {
test1[idx] = (float)rand() / (float)RAND_MAX;
}).wait();
// Show the random number sequence
for (int i = 0; i < N; i++)
std::cout << test1[i] << std::endl;
// Cleanup
sycl::free(test1, Q.get_context());
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/stream-triad-modified-spec-const.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <iostream>
#include <string>
#ifndef NTIMES
#define NTIMES 10
#endif
class specialized_kernel;
constexpr int num_runs = NTIMES;
constexpr size_t scalar = 3;
constexpr sycl::specialization_id<size_t> trip_sc;
cl_ulong triad(size_t array_size, size_t inner_loop_size) {
cl_ulong min_time_ns0 = DBL_MAX;
sycl::queue q = sycl::queue(sycl::property::queue::enable_profiling{});
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
// Setup arrays
double *A0 = sycl::malloc_shared<double>(array_size / 2, q);
double *B0 = sycl::malloc_shared<double>(array_size / 2, q);
double *C0 = sycl::malloc_shared<double>(array_size / 2, q);
for (size_t i = 0; i < array_size / 2; i++) {
A0[i] = 1.0;
B0[i] = 2.0;
C0[i] = 0.0;
}
// Run main computation <num_runs> times & record best time
for (int i = 0; i < num_runs; i++) {
auto q0_event = q.submit([&](sycl::handler &h) {
// set specialization constant using runtime variable
h.set_specialization_constant<trip_sc>(inner_loop_size);
h.parallel_for<specialized_kernel>(
array_size / 2, [=](auto idx, sycl::kernel_handler kh) {
// set trip count to the now known specialization constant
auto runtime_trip_count_const =
kh.get_specialization_constant<trip_sc>();
auto accum = 0;
for (size_t j = 0; j < runtime_trip_count_const; j++) {
auto multiplier = scalar * j;
accum = accum + A0[idx] + B0[idx] * multiplier;
}
C0[idx] = accum;
});
});
q.wait();
cl_ulong exec_time_ns0 =
q0_event
.get_profiling_info<sycl::info::event_profiling::command_end>() -
q0_event
.get_profiling_info<sycl::info::event_profiling::command_start>();
std::cout << "Execution time (iteration " << i
<< ") [sec]: " << (double)exec_time_ns0 * 1.0E-9 << "\n";
min_time_ns0 = std::min(min_time_ns0, exec_time_ns0);
}
// Check correctness
bool error = false;
for (size_t vi = 0; vi < array_size / 2; vi++) {
// Compute test result
auto vaccum = 0;
auto vruntime_trip_count_const = inner_loop_size;
for (size_t vj = 0; vj < vruntime_trip_count_const; vj++) {
auto vmultiplier = scalar * vj;
vaccum = vaccum + A0[vi] + B0[vi] * vmultiplier;
}
// Verify correctness of C0 for current index
if (C0[vi] != vaccum) {
std::cout << "\nResult incorrect (element " << vi << " is " << C0[vi]
<< ")!\n";
error = true;
}
}
// Release resources
sycl::free(A0, q);
sycl::free(B0, q);
sycl::free(C0, q);
if (error)
return -1;
std::cout << "Results are correct!\n\n";
return min_time_ns0;
}
int main(int argc, char *argv[]) {
// Input & program info display
size_t array_size;
size_t inner_loop_size;
if (argc > 2) {
array_size = std::stoi(argv[1]);
inner_loop_size = std::stoi(argv[2]);
} else {
std::cout
<< "Run as ./<progname> <arraysize in elements> <inner loop size>\n";
return 1;
}
std::cout << "Running with stream size of " << array_size << " elements ("
<< (array_size * sizeof(double)) / (double)1024 / 1024 << "MB)\n";
std::cout << "Running with inner trip count of " << inner_loop_size << "\n";
// Compute triad
cl_ulong min_time = triad(array_size, inner_loop_size);
size_t min_cmp = -1;
if (min_time == min_cmp)
return 1;
size_t triad_bytes = 3 * sizeof(double) * array_size;
std::cout << "Triad Bytes: " << triad_bytes << "\n";
std::cout << "Time in sec (fastest run): " << min_time * 1.0E-9 << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/spec-const2.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <chrono>
#include <vector>
class specialized_kernel;
class literal_kernel;
// const static identifier of specialization constant
const static sycl::specialization_id<float> value_id;
// Fetch a value at runtime.
float get_value() { return 10; };
int main() {
sycl::queue queue;
// Get kernel ID from kernel class qualifier
sycl::kernel_id specialized_kernel_id =
sycl::get_kernel_id<specialized_kernel>();
// Construct kernel bundle with only specialized_kernel in the input state
sycl::kernel_bundle kb_src =
sycl::get_kernel_bundle<sycl::bundle_state::input>(
queue.get_context(), {specialized_kernel_id});
// set specialization constant value
kb_src.set_specialization_constant<value_id>(get_value());
auto start = std::chrono::steady_clock::now();
// build the kernel bundle for the set value
sycl::kernel_bundle kb_exe = sycl::build(kb_src);
auto end = std::chrono::steady_clock::now();
std::cout << "specialization took - " << (end - start).count()
<< " nano-secs\n";
std::vector<float> vec{0, 0, 0, 0, 0};
sycl::buffer<float> buffer1(vec.data(), vec.size());
sycl::buffer<float> buffer2(vec.data(), vec.size());
start = std::chrono::steady_clock::now();
{
queue.submit([&](auto &cgh) {
sycl::accessor acc(buffer1, cgh, sycl::write_only, sycl::no_init);
// use the precompiled kernel bundle in the executable state
cgh.use_kernel_bundle(kb_exe);
cgh.template single_task<specialized_kernel>(
[=](sycl::kernel_handler kh) {
float v = kh.get_specialization_constant<value_id>();
acc[0] = v;
});
});
queue.wait_and_throw();
}
end = std::chrono::steady_clock::now();
{
sycl::host_accessor host_acc(buffer1, sycl::read_only);
std::cout << "result1 (c): " << host_acc[0] << " " << host_acc[1] << " "
<< host_acc[2] << " " << host_acc[3] << " " << host_acc[4]
<< std::endl;
}
std::cout << "execution took : " << (end - start).count() << " nano-secs\n";
start = std::chrono::steady_clock::now();
{
queue.submit([&](auto &cgh) {
sycl::accessor acc(buffer2, cgh, sycl::write_only, sycl::no_init);
cgh.template single_task<literal_kernel>([=]() { acc[0] = 20; });
});
queue.wait_and_throw();
}
end = std::chrono::steady_clock::now();
{
sycl::host_accessor host_acc(buffer2, sycl::read_only);
std::cout << "result2 (c): " << host_acc[0] << " " << host_acc[1] << " "
<< host_acc[2] << " " << host_acc[3] << " " << host_acc[4]
<< std::endl;
}
std::cout << "execution took - " << (end - start).count() << " nano-secs\n";
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/spec-const3.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
class SpecializedKernel;
// Identify the specialization constant.
constexpr sycl::specialization_id<int> nx_sc;
int main() {
sycl::queue queue;
std::cout << "Running on "
<< queue.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<float> vec(1);
{
sycl::buffer<float> buf(vec.data(), vec.size());
// Application execution stops here asking for input from user
int Nx;
std::cout << "Enter input number ..." << std::endl;
std::cin >> Nx;
queue.submit([&](sycl::handler &h) {
sycl::accessor acc(buf, h, sycl::write_only, sycl::no_init);
// set specialization constant with runtime variable
h.set_specialization_constant<nx_sc>(Nx);
h.single_task<SpecializedKernel>([=](sycl::kernel_handler kh) {
// nx_sc value here will be input value provided at runtime and
// can be optimized because JIT compiler now treats it as a constant.
int runtime_const_trip_count = kh.get_specialization_constant<nx_sc>();
int accum = 0;
for (int i = 0; i < runtime_const_trip_count; i++) {
accum = accum + i;
}
acc[0] = accum;
});
});
}
std::cout << vec[0] << std::endl;
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/stream-triad-modified-constant.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <iostream>
#include <string>
#ifndef NTIMES
#define NTIMES 10
#endif
class regular_constant_kernel;
constexpr int num_runs = NTIMES;
constexpr size_t scalar = 3;
cl_ulong triad(size_t array_size, size_t inner_loop_size) {
cl_ulong min_time_ns0 = DBL_MAX;
sycl::queue q = sycl::queue(sycl::property::queue::enable_profiling{});
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
// Setup arrays
double *A0 = sycl::malloc_shared<double>(array_size / 2, q);
double *B0 = sycl::malloc_shared<double>(array_size / 2, q);
double *C0 = sycl::malloc_shared<double>(array_size / 2, q);
for (size_t i = 0; i < array_size / 2; i++) {
A0[i] = 1.0;
B0[i] = 2.0;
C0[i] = 0.0;
}
// Run main computation <num_runs> times & record best time
for (size_t i = 0; i < num_runs; i++) {
auto q0_event = q.submit([&](sycl::handler &h) {
h.parallel_for<regular_constant_kernel>(array_size / 2, [=](auto idx) {
// set trip count to known regular constant
size_t runtime_trip_count_const = 10;
auto accum = 0;
for (size_t j = 0; j < runtime_trip_count_const; j++) {
auto multiplier = scalar * j;
accum = accum + A0[idx] + B0[idx] * multiplier;
}
C0[idx] = accum;
});
});
q.wait();
cl_ulong exec_time_ns0 =
q0_event
.get_profiling_info<sycl::info::event_profiling::command_end>() -
q0_event
.get_profiling_info<sycl::info::event_profiling::command_start>();
std::cout << "Execution time (iteration " << i
<< ") [sec]: " << (double)exec_time_ns0 * 1.0E-9 << "\n";
min_time_ns0 = std::min(min_time_ns0, exec_time_ns0);
}
// Check correctness
bool error = false;
for (size_t vi = 0; vi < array_size / 2; vi++) {
// Compute test result
auto vaccum = 0;
auto vruntime_trip_count_const = inner_loop_size;
for (size_t vj = 0; vj < vruntime_trip_count_const; vj++) {
auto vmultiplier = scalar * vj;
vaccum = vaccum + A0[vi] + B0[vi] * vmultiplier;
}
// Verify correctness of C0 for current index
if (C0[vi] != vaccum) {
std::cout << "\nResult incorrect (element " << vi << " is " << C0[vi]
<< ")!\n";
error = true;
}
}
// Release resources
sycl::free(A0, q);
sycl::free(B0, q);
sycl::free(C0, q);
if (error)
return -1;
std::cout << "Results are correct!\n\n";
return min_time_ns0;
}
int main(int argc, char *argv[]) {
// Input & program info display
size_t array_size;
size_t inner_loop_size;
if (argc > 2) {
array_size = std::stoi(argv[1]);
inner_loop_size = std::stoi(argv[2]);
} else {
std::cout
<< "Run as ./<progname> <arraysize in elements> <inner loop size>\n";
return 1;
}
std::cout << "Running with stream size of " << array_size << " elements ("
<< (array_size * sizeof(double)) / (double)1024 / 1024 << "MB)\n";
std::cout << "Running with inner trip count of " << inner_loop_size << "\n";
// Compute triad
cl_ulong min_time = triad(array_size, inner_loop_size);
size_t min_cmp = -1;
if (min_time == min_cmp)
return 1;
size_t triad_bytes = 3 * sizeof(double) * array_size;
std::cout << "Triad Bytes: " << triad_bytes << "\n";
std::cout << "Time in sec (fastest run): " << min_time * 1.0E-9 << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/spec-const1.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <vector>
class specialized_kernel;
// const static identifier of specialization constant
const static sycl::specialization_id<float> value_id;
// Fetch a value at runtime.
float get_value() { return 10; };
int main() {
sycl::queue queue;
std::vector<float> vec(1);
{
sycl::buffer<float> buffer(vec.data(), vec.size());
queue.submit([&](auto &cgh) {
sycl::accessor acc(buffer, cgh, sycl::write_only, sycl::no_init);
// Set value of specialization constant.
cgh.template set_specialization_constant<value_id>(get_value());
// Runtime builds the kernel with specialization constant
// replaced by the literal value provided in the preceding
// call of `set_specialization_constant<value_id>`
cgh.template single_task<specialized_kernel>(
[=](sycl::kernel_handler kh) {
const float val = kh.get_specialization_constant<value_id>();
acc[0] = val;
});
});
}
queue.wait_and_throw();
std::cout << vec[0] << std::endl;
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/stream-triad-modified-runtime-var.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <iostream>
#include <string>
#ifndef NTIMES
#define NTIMES 10
#endif
class non_specialized_kernel;
constexpr int num_runs = NTIMES;
constexpr size_t scalar = 3;
cl_ulong triad(size_t array_size, size_t inner_loop_size) {
cl_ulong min_time_ns0 = DBL_MAX;
sycl::queue q = sycl::queue(sycl::property::queue::enable_profiling{});
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
// Setup arrays
double *A0 = sycl::malloc_shared<double>(array_size / 2, q);
double *B0 = sycl::malloc_shared<double>(array_size / 2, q);
double *C0 = sycl::malloc_shared<double>(array_size / 2, q);
for (size_t i = 0; i < array_size / 2; i++) {
A0[i] = 1.0;
B0[i] = 2.0;
C0[i] = 0.0;
}
// Run main computation <num_runs> times & record best time
for (size_t i = 0; i < num_runs; i++) {
auto q0_event = q.submit([&](sycl::handler &h) {
h.parallel_for<non_specialized_kernel>(array_size / 2, [=](auto idx) {
// set trip count to runtime variable
auto runtime_trip_count_const = inner_loop_size;
auto accum = 0;
for (size_t j = 0; j < runtime_trip_count_const; j++) {
auto multiplier = scalar * j;
accum = accum + A0[idx] + B0[idx] * multiplier;
}
C0[idx] = accum;
});
});
q.wait();
cl_ulong exec_time_ns0 =
q0_event
.get_profiling_info<sycl::info::event_profiling::command_end>() -
q0_event
.get_profiling_info<sycl::info::event_profiling::command_start>();
std::cout << "Execution time (iteration " << i
<< ") [sec]: " << (double)exec_time_ns0 * 1.0E-9 << "\n";
min_time_ns0 = std::min(min_time_ns0, exec_time_ns0);
}
// Check correctness
bool error = false;
for (size_t vi = 0; vi < array_size / 2; vi++) {
// Compute test result
auto vaccum = 0;
auto vruntime_trip_count_const = inner_loop_size;
for (size_t vj = 0; vj < vruntime_trip_count_const; vj++) {
auto vmultiplier = scalar * vj;
vaccum = vaccum + A0[vi] + B0[vi] * vmultiplier;
}
// Verify correctness of C0 for current index
if (C0[vi] != vaccum) {
std::cout << "\nResult incorrect (element " << vi << " is " << C0[vi]
<< ")!\n";
error = true;
}
}
// Release resources
sycl::free(A0, q);
sycl::free(B0, q);
sycl::free(C0, q);
if (error)
return -1;
std::cout << "Results are correct!\n\n";
return min_time_ns0;
}
int main(int argc, char *argv[]) {
// Input & program info display
size_t array_size;
size_t inner_loop_size;
if (argc > 2) {
array_size = std::stoi(argv[1]);
inner_loop_size = std::stoi(argv[2]);
} else {
std::cout
<< "Run as ./<progname> <arraysize in elements> <inner loop size>\n";
return 1;
}
std::cout << "Running with stream size of " << array_size << " elements ("
<< (array_size * sizeof(double)) / (double)1024 / 1024 << "MB)\n";
std::cout << "Running with inner trip count of " << inner_loop_size << "\n";
// Compute triad
cl_ulong min_time = triad(array_size, inner_loop_size);
size_t min_cmp = -1;
if (min_time == min_cmp)
return 1;
size_t triad_bytes = 3 * sizeof(double) * array_size;
std::cout << "Triad Bytes: " << triad_bytes << "\n";
std::cout << "Time in sec (fastest run): " << min_time * 1.0E-9 << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/jitting/jit.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
// Array type and data size for this example.
constexpr size_t array_size = (1 << 16);
typedef std::array<int, array_size> IntArray;
void VectorAdd1(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
auto e = q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items,
[=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
q.wait();
}
void VectorAdd2(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
auto e = q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items,
[=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
q.wait();
}
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = i;
}
int main() {
IntArray a, b, sum;
InitializeArray(a);
InitializeArray(b);
sycl::queue q(sycl::default_selector_v,
sycl::property::queue::enable_profiling{});
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
auto start = std::chrono::steady_clock::now();
VectorAdd1(q, a, b, sum);
auto end = std::chrono::steady_clock::now();
std::cout << "Initial Vector add1 successfully completed on device - took "
<< (end - start).count() << " nano-secs\n";
start = std::chrono::steady_clock::now();
VectorAdd1(q, a, b, sum);
end = std::chrono::steady_clock::now();
std::cout << "Second Vector add1 successfully completed on device - took "
<< (end - start).count() << " nano-secs\n";
start = std::chrono::steady_clock::now();
VectorAdd2(q, a, b, sum);
end = std::chrono::steady_clock::now();
std::cout << "Initial Vector add2 successfully completed on device - took "
<< (end - start).count() << " nano-secs\n";
start = std::chrono::steady_clock::now();
VectorAdd2(q, a, b, sum);
end = std::chrono::steady_clock::now();
std::cout << "Second Vector add2 successfully completed on device - took "
<< (end - start).count() << " nano-secs\n";
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/grf-mode-selection/perf/large/ax-static-large.cpp
|
#include <omp.h>
#include <stdio.h>
#define ELEM 32000
#define NX1 12
#define ZDIM 6
#define I2(i, j) (j * NX1 + i)
#define I3(i, j, k) (k * NX1 * NX1 + j * NX1 + i)
#define I4(i, j, k, e) (e * NX1 * NX1 * NX1 + k * NX1 * NX1 + j * NX1 + i)
int c_ax_v80_static(int nelt, int nx, double *w, double *u, double *g,
double *D) {
int innerub = ZDIM * nx * nx;
// start OpenMP offload region
#pragma omp target teams distribute thread_limit(ZDIM *NX1 *NX1)
for (int e = 0; e < nelt; e++) {
double s_u[NX1 * NX1 * NX1];
double s_D[NX1 * NX1];
// SLM used for the three arrays here
double s_ur[NX1 * NX1 * NX1];
double s_us[NX1 * NX1 * NX1];
double s_ut[NX1 * NX1 * NX1];
#pragma omp parallel for
for (int inner = 0; inner < innerub; inner++) {
int k = inner / (NX1 * NX1);
int j = (inner - k * NX1 * NX1) / NX1;
int i = inner - k * NX1 * NX1 - j * NX1;
if (k == 0)
s_D[I2(i, j)] = D[I2(i, j)];
for (; k < NX1; k += ZDIM) {
s_u[I3(i, j, k)] = u[I4(i, j, k, e)];
}
}
#pragma omp parallel for
for (int inner = 0; inner < innerub; inner++) {
int k = inner / (NX1 * NX1);
int j = (inner - k * NX1 * NX1) / NX1;
int i = inner - k * NX1 * NX1 - j * NX1;
double r_G00, r_G01, r_G02, r_G11, r_G12, r_G22;
for (; k < NX1; k += ZDIM) {
double r_ur, r_us, r_ut;
r_ur = r_us = r_ut = 0;
#ifdef FORCE_UNROLL
#pragma unroll NX1
#endif
for (int m = 0; m < NX1; m++) {
r_ur += s_D[I2(i, m)] * s_u[I3(m, j, k)];
r_us += s_D[I2(j, m)] * s_u[I3(i, m, k)];
r_ut += s_D[I2(k, m)] * s_u[I3(i, j, m)];
}
const unsigned gbase = 6 * I4(i, j, k, e);
r_G00 = g[gbase + 0];
r_G01 = g[gbase + 1];
r_G02 = g[gbase + 2];
s_ur[I3(i, j, k)] = r_G00 * r_ur + r_G01 * r_us + r_G02 * r_ut;
r_G11 = g[gbase + 3];
r_G12 = g[gbase + 4];
s_us[I3(i, j, k)] = r_G01 * r_ur + r_G11 * r_us + r_G12 * r_ut;
r_G22 = g[gbase + 5];
s_ut[I3(i, j, k)] = r_G02 * r_ur + r_G12 * r_us + r_G22 * r_ut;
}
}
#pragma omp parallel for
for (int inner = 0; inner < innerub; inner++) {
int k = inner / (NX1 * NX1);
int j = (inner - k * NX1 * NX1) / NX1;
int i = inner - k * NX1 * NX1 - j * NX1;
for (; k < NX1; k += ZDIM) {
double wr = 0.0;
for (int m = 0; m < NX1; m++) {
double s_D_i = s_D[I2(m, i)];
double s_D_j = s_D[I2(m, j)];
double s_D_k = s_D[I2(m, k)];
wr += s_D_i * s_ur[I3(m, j, k)] + s_D_j * s_us[I3(i, m, k)] +
s_D_k * s_ut[I3(i, j, m)];
}
w[I4(i, j, k, e)] = wr;
}
}
}
// end OpenMP offload region
return 0;
}
int main(void) {
#define ASIZE (ELEM * NX1 * NX1 * NX1)
static double w[ASIZE], u[ASIZE], g[6 * ASIZE], D[NX1 * NX1];
int nelt = ELEM;
int nx = NX1;
omp_set_default_device(0);
for (int i = 0; i < nx * nx; i++)
D[i] = 1.0;
for (size_t i = 0; i < ASIZE; i++) {
w[i] = 0.0;
u[i] = 1.0;
for (int j = 0; j < 6; j++)
g[i * 6 + j] = 1.0;
}
#pragma omp target enter data map(alloc \
: w [0:ASIZE], u [0:ASIZE], g [0:6 * ASIZE], \
D [0:NX1 * NX1])
#pragma omp target update to(w [0:ASIZE], u [0:ASIZE], g [0:6 * ASIZE], \
D [0:NX1 * NX1])
for (int i = 0; i < 100; i++)
c_ax_v80_static(nelt, nx, w, u, g, D);
#pragma omp target exit data map(delete \
: w [0:ASIZE], u [0:ASIZE], g [0:6 * ASIZE], \
D [0:NX1 * NX1])
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/grf-mode-selection/perf/small/ax-static-small.cpp
|
#include <omp.h>
#include <stdio.h>
#define ELEM 32000
#define NX1 12
#define ZDIM 6
#define I2(i, j) (j * NX1 + i)
#define I3(i, j, k) (k * NX1 * NX1 + j * NX1 + i)
#define I4(i, j, k, e) (e * NX1 * NX1 * NX1 + k * NX1 * NX1 + j * NX1 + i)
int c_ax_v80_static(int nelt, int nx, double *w, double *u, double *g,
double *D) {
int innerub = ZDIM * nx * nx;
// start OpenMP offload region
#pragma omp target teams distribute thread_limit(ZDIM *NX1 *NX1)
for (int e = 0; e < nelt; e++) {
double s_u[NX1 * NX1 * NX1];
double s_D[NX1 * NX1];
// SLM used for the three arrays here
double s_ur[NX1 * NX1 * NX1];
double s_us[NX1 * NX1 * NX1];
double s_ut[NX1 * NX1 * NX1];
#pragma omp parallel for
for (int inner = 0; inner < innerub; inner++) {
int k = inner / (NX1 * NX1);
int j = (inner - k * NX1 * NX1) / NX1;
int i = inner - k * NX1 * NX1 - j * NX1;
if (k == 0)
s_D[I2(i, j)] = D[I2(i, j)];
for (; k < NX1; k += ZDIM) {
s_u[I3(i, j, k)] = u[I4(i, j, k, e)];
}
}
#pragma omp parallel for
for (int inner = 0; inner < innerub; inner++) {
int k = inner / (NX1 * NX1);
int j = (inner - k * NX1 * NX1) / NX1;
int i = inner - k * NX1 * NX1 - j * NX1;
double r_G00, r_G01, r_G02, r_G11, r_G12, r_G22;
for (; k < NX1; k += ZDIM) {
double r_ur, r_us, r_ut;
r_ur = r_us = r_ut = 0;
#ifdef FORCE_UNROLL
#pragma unroll NX1
#endif
for (int m = 0; m < NX1; m++) {
r_ur += s_D[I2(i, m)] * s_u[I3(m, j, k)];
r_us += s_D[I2(j, m)] * s_u[I3(i, m, k)];
r_ut += s_D[I2(k, m)] * s_u[I3(i, j, m)];
}
const unsigned gbase = 6 * I4(i, j, k, e);
r_G00 = g[gbase + 0];
r_G01 = g[gbase + 1];
r_G02 = g[gbase + 2];
s_ur[I3(i, j, k)] = r_G00 * r_ur + r_G01 * r_us + r_G02 * r_ut;
r_G11 = g[gbase + 3];
r_G12 = g[gbase + 4];
s_us[I3(i, j, k)] = r_G01 * r_ur + r_G11 * r_us + r_G12 * r_ut;
r_G22 = g[gbase + 5];
s_ut[I3(i, j, k)] = r_G02 * r_ur + r_G12 * r_us + r_G22 * r_ut;
}
}
#pragma omp parallel for
for (int inner = 0; inner < innerub; inner++) {
int k = inner / (NX1 * NX1);
int j = (inner - k * NX1 * NX1) / NX1;
int i = inner - k * NX1 * NX1 - j * NX1;
for (; k < NX1; k += ZDIM) {
double wr = 0.0;
for (int m = 0; m < NX1; m++) {
double s_D_i = s_D[I2(m, i)];
double s_D_j = s_D[I2(m, j)];
double s_D_k = s_D[I2(m, k)];
wr += s_D_i * s_ur[I3(m, j, k)] + s_D_j * s_us[I3(i, m, k)] +
s_D_k * s_ut[I3(i, j, m)];
}
w[I4(i, j, k, e)] = wr;
}
}
}
// end OpenMP offload region
return 0;
}
int main(void) {
#define ASIZE (ELEM * NX1 * NX1 * NX1)
static double w[ASIZE], u[ASIZE], g[6 * ASIZE], D[NX1 * NX1];
int nelt = ELEM;
int nx = NX1;
omp_set_default_device(0);
for (int i = 0; i < nx * nx; i++)
D[i] = 1.0;
for (size_t i = 0; i < ASIZE; i++) {
w[i] = 0.0;
u[i] = 1.0;
for (int j = 0; j < 6; j++)
g[i * 6 + j] = 1.0;
}
#pragma omp target enter data map(alloc \
: w [0:ASIZE], u [0:ASIZE], g [0:6 * ASIZE], \
D [0:NX1 * NX1])
#pragma omp target update to(w [0:ASIZE], u [0:ASIZE], g [0:6 * ASIZE], \
D [0:NX1 * NX1])
for (int i = 0; i < 100; i++)
c_ax_v80_static(nelt, nx, w, u, g, D);
#pragma omp target exit data map(delete \
: w [0:ASIZE], u [0:ASIZE], g [0:6 * ASIZE], \
D [0:NX1 * NX1])
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/grf-mode-selection/openmp/grf-mode-selection-omp.cpp
|
int main(void) {
#pragma omp target
{ /* empty body */
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/grf-mode-selection/sycl/grf-mode-selection-sycl.cpp
|
#include <CL/sycl.hpp>
#include <sycl/ext/intel/experimental/kernel_properties.hpp>
int main() {
// Creating buffer of 4 ints to be used inside the kernel code
std::vector<int> input(4);
sycl::buffer<int> buf(input.data(), 4);
// Creating SYCL queue
sycl::queue Queue;
sycl::range num_items{input.size()};
// Submitting command group(work) to queue
Queue.submit([&](sycl::handler &cgh) {
// Getting write only access to the buffer on a device
sycl::accessor buf_acc(buf, cgh, sycl::write_only, sycl::no_init);
cgh.parallel_for(num_items, [=](auto i) {
sycl::ext::intel::experimental::set_kernel_properties(
sycl::ext::intel::experimental::kernel_properties::use_large_grf);
// Fill buffer with indexes
buf_acc[i] = i;
});
});
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/02_mem_partitioning/simple_example.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
int *a = (int *)omp_target_alloc(sizeof(int) * N, device_id);
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < N; ++i)
{
a[i] = i;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/02_mem_partitioning/simple_example_sycl.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
int *a = sycl::malloc_device<int>(N, q);
q.parallel_for(N, [=](auto i) {
a[i] = i;
});
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/06_cross_stack/cross_stack_01.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#pragma omp target teams distribute parallel for simd
for (int i = N - 1; i <= 0; --i)
{
c[i] = a[i] + b[i];
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/01_wg_partitioning/wg_partitioning_3D.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#pragma omp target teams distribute parallel for simd collapse(3)
for (int z = 0; z < nz; ++z)
{
for (int y = 0; y < ny; ++y)
{
for (int x = 0; x < nx; ++x)
{
//
}
}
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/01_wg_partitioning/wg_partitioning_3D_sycl.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
range<3> global{nz, ny, nx};
range<3> local{1, 1, 16};
cgh.parallel_for(nd_range<3>(global, local), [=](nd_item<3> item) {
//
});
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/01_wg_partitioning/wg_partitioning_1D_sycl.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
q.parallel_for(N, [=](auto i) {
//
});
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/01_wg_partitioning/wg_partitioning_1D.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < N; ++i)
{
//
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/05_stream_cross_tile/stream_cross_tile.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Code for cross tile stream
#include <iostream>
#include <omp.h>
// compile via:
// icpx -O2 -fiopenmp -fopenmp-targets=spir64 ./stream_cross_tile.cpp
// run via:
// EnableWalkerPartition=1 ZE_AFFINITY_MASK=0 ./a.out
int main()
{
constexpr int64_t N = 256 * 1e6;
constexpr int64_t bytes = N * sizeof(int);
// vary n_th from 1 to 8 to change cross-tile traffice
constexpr int n_th = 4;
std::cout << "array size = " << bytes * 1e-9 << " GB" << std::endl;
int *a = static_cast<int *>(malloc(bytes));
int *b = static_cast<int *>(malloc(bytes));
int *c = static_cast<int *>(malloc(bytes));
#pragma omp target enter data map(alloc:a[0:N])
#pragma omp target enter data map(alloc:b[0:N])
#pragma omp target enter data map(alloc:c[0:N])
for (int i = 0; i < N; ++i)
{
a[i] = i + 1;
b[i] = i - 1;
}
#pragma omp target update to(a[0:N])
#pragma omp target update to(b[0:N])
const int no_max_rep = 100;
double time;
for (int irep = 0; irep < no_max_rep + 10; ++irep)
{
if (irep == 10) time = omp_get_wtime();
#pragma omp target teams distribute parallel for \
simd simdlen(32) thread_limit(256)
for (int j = 0; j < N; ++j)
{
const int cache_line_id = j / 16;
int i;
if ((cache_line_id % n_th) == 0)
{
i = (j + N / 2) % N;
}
else
{
i = j;
}
c[i] = a[i] + b[i];
}
}
time = omp_get_wtime() - time;
time = time / no_max_rep;
#pragma omp target update from(c[0:N])
for (int i = 0; i < N; ++i)
{
if (c[i] != 2 * i)
{
std::cout << "wrong results at i " << i << std::endl;
exit(1);
}
}
const int64_t streamed_bytes = 3 * N * sizeof(int);
std::cout << "bandwidth = " << (streamed_bytes / time) * 1E-9 << " GB/s"
<< std::endl;
std::cout << "cross-tile traffic = " << (1 / (double)n_th) * 100 << "%"
<< std::endl;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/04_stream_3D/stream_3D.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Code for 3D STREAM
#include <iostream>
#include <omp.h>
#include <cassert>
// compile via:
// icpx -O2 -fiopenmp -fopenmp-targets=spir64 ./stream_3D.cpp
int main(int argc, char **argv)
{
const int device_id = omp_get_default_device();
const int desired_total_size = 32 * 512 * 16384;
const std::size_t bytes = desired_total_size * sizeof(int64_t);
std::cout << "memory footprint = " << 3 * bytes * 1E-9 << " GB"
<< std::endl;
int64_t *a = static_cast<int64_t*>(omp_target_alloc_device(bytes, device_id));
int64_t *b = static_cast<int64_t*>(omp_target_alloc_device(bytes, device_id));
int64_t *c = static_cast<int64_t*>(omp_target_alloc_device(bytes, device_id));
const int min = 64;
const int max = 32768;
for (int lx = min; lx < max; lx *= 2)
{
for (int ly = min; ly < max; ly *= 2)
{
for (int lz = min; lz < max; lz *= 2)
{
const int total_size = lx * ly * lz;
if (total_size != desired_total_size)
continue;
std::cout << "lx=" << lx << " ly=" << ly << " lz="
<< lz << ", ";
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < total_size; ++i)
{
a[i] = i + 1;
b[i] = i - 1;
c[i] = 0;
}
const int no_max_rep = 40;
const int warmup = 10;
double time;
for (int irep = 0; irep < no_max_rep + warmup; ++irep)
{
if (irep == warmup) time = omp_get_wtime();
#pragma omp target teams distribute parallel for simd collapse(3)
for (int iz = 0; iz < lz; ++iz)
{
for (int iy = 0; iy < ly; ++iy)
{
for (int ix = 0; ix < lx; ++ix)
{
const int index = ix + iy * lx + iz * lx * ly;
c[index] = a[index] + b[index];
}
}
}
}
time = omp_get_wtime() - time;
time = time / no_max_rep;
const int64_t streamed_bytes = 3 * total_size * sizeof(int64_t);
std::cout << "bandwidth = " << (streamed_bytes / time) * 1E-9
<< " GB/s" << std::endl;
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < total_size; ++i)
{
assert(c[i] == 2 * i);
}
}
}
}
omp_target_free(a, device_id);
omp_target_free(b, device_id);
omp_target_free(c, device_id);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/06_cross_tile/cross_tile_01.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Snippet begin
#pragma omp target teams distribute parallel for simd
for (int i = N - 1; i <= 0; --i)
{
c[i] = a[i] + b[i];
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/05_stream_cross_stack/stream_cross_stack.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Code for cross stack stream
#include <iostream>
#include <omp.h>
// compile via:
// icpx -O2 -fiopenmp -fopenmp-targets=spir64 ./stream_cross_stack.cpp
// run via:
// EnableWalkerPartition=1 ZE_AFFINITY_MASK=0 ./a.out
template <int cross_stack_fraction>
void cross_stack_stream() {
constexpr int64_t size = 256*1e6;
constexpr int64_t bytes = size * sizeof(int64_t);
int64_t *a = static_cast<int64_t*>(malloc( bytes ));
int64_t *b = static_cast<int64_t*>(malloc( bytes ));
int64_t *c = static_cast<int64_t*>(malloc( bytes ));
#pragma omp target enter data map( alloc:a[0:size] )
#pragma omp target enter data map( alloc:b[0:size] )
#pragma omp target enter data map( alloc:c[0:size] )
for ( int i = 0; i < size; ++i ) {
a[i] = i + 1;
b[i] = i - 1;
c[i] = 0;
}
#pragma omp target update to( a[0:size] )
#pragma omp target update to( b[0:size] )
#pragma omp target update to( c[0:size] )
const int num_max_rep = 100;
double time;
for ( int irep = 0; irep < num_max_rep+10; ++irep ) {
if ( irep == 10 ) time = omp_get_wtime();
#pragma omp target teams distribute parallel for simd
for ( int j = 0; j < size; ++j ) {
const int cache_line_id = j / 16;
int i;
if ( (cache_line_id%cross_stack_fraction) == 0 ) {
i = (j+size/2)%size;
}
else {
i = j;
}
c[i] = a[i] + b[i];
}
}
time = omp_get_wtime() - time;
time = time/num_max_rep;
#pragma omp target update from( c[0:size] )
for ( int i = 0; i < size; ++i ) {
if ( c[i] != 2*i ) {
std::cout << "wrong results!" << std::endl;
exit(1);
}
}
const int64_t streamed_bytes = 3 * size * sizeof(int64_t);
std::cout << "cross_stack_percent = " << (1/(double)cross_stack_fraction)*100
<< "%, bandwidth = " << (streamed_bytes/time) * 1E-9 << " GB/s" << std::endl;
}
int main() {
cross_stack_stream< 1>();
cross_stack_stream< 2>();
cross_stack_stream< 4>();
cross_stack_stream< 8>();
cross_stack_stream<16>();
cross_stack_stream<32>();
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/implicit-scaling/03_stream/stream.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// clang-format off
// Code for STREAM:
#include <iostream>
#include <omp.h>
#include <cstdint>
// compile via:
// icpx -O2 -fiopenmp -fopenmp-targets=spir64 ./stream.cpp
int main()
{
constexpr int64_t N = 256 * 1e6;
constexpr int64_t bytes = N * sizeof(int64_t);
int64_t *a = static_cast<int64_t *>(malloc(bytes));
int64_t *b = static_cast<int64_t *>(malloc(bytes));
int64_t *c = static_cast<int64_t *>(malloc(bytes));
#pragma omp target enter data map(alloc:a[0:N])
#pragma omp target enter data map(alloc:b[0:N])
#pragma omp target enter data map(alloc:c[0:N])
for (int i = 0; i < N; ++i)
{
a[i] = i + 1;
b[i] = i - 1;
}
#pragma omp target update to(a[0:N])
#pragma omp target update to(b[0:N])
const int no_max_rep = 100;
double time;
for (int irep = 0; irep < no_max_rep + 10; ++irep)
{
if (irep == 10)
time = omp_get_wtime();
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < N; ++i)
{
c[i] = a[i] + b[i];
}
}
time = omp_get_wtime() - time;
time = time / no_max_rep;
#pragma omp target update from(c[0:N])
for (int i = 0; i < N; ++i)
{
if (c[i] != 2 * i)
{
std::cout << "wrong results!" << std::endl;
exit(1);
}
}
const int64_t streamed_bytes = 3 * N * sizeof(int64_t);
std::cout << "bandwidth = " << (streamed_bytes / time) * 1E-9
<< " GB/s" << std::endl;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffer-accessors/kern1.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 100;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
// Kernel1
{
// Create 3 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
sycl::buffer<int> CBuf(&CData[0], N);
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aC(CBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 11;
aB[i] = 22;
aC[i] = 0;
});
});
} // end Kernel1
// Kernel2
{
// Create 3 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
sycl::buffer<int> CBuf(&CData[0], N);
Q.submit([&](auto &h) {
// Create device accessors
sycl::accessor aA(ABuf, h, sycl::read_only);
sycl::accessor aB(BBuf, h, sycl::read_only);
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) { aC[i] += aA[i] + aB[i]; });
});
} // end Kernel2
// Buffers are destroyed and so CData is updated and can be accessed
for (int i = 0; i < N; i++) {
printf("%d\n", CData[i]);
}
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffer-accessors/kern3.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 100;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
{
// Create 3 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
sycl::buffer<int> CBuf(&CData[0], N);
// Kernel1
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aC(CBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 11;
aB[i] = 22;
aC[i] = 0;
});
});
// Kernel2
Q.submit([&](auto &h) {
// Create device accessors
sycl::accessor aA(ABuf, h, sycl::read_only);
sycl::accessor aB(BBuf, h, sycl::read_only);
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) { aC[i] += aA[i] + aB[i]; });
});
}
// Since the buffers are going out of scope, they will have to be
// copied back from device to host and this will require a wait for
// all the kernels to finish and so no explicit wait is needed
for (int i = 0; i < N; i++) {
printf("%d\n", CData[i]);
}
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffer-accessors/kern2.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 100;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
// Create 3 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
sycl::buffer<int> CBuf(&CData[0], N);
// Kernel1
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aC(CBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 11;
aB[i] = 22;
aC[i] = 0;
});
});
// Kernel2
Q.submit([&](auto &h) {
// Create device sycl::accessors
sycl::accessor aA(ABuf, h, sycl::read_only);
sycl::accessor aB(BBuf, h, sycl::read_only);
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) { aC[i] += aA[i] + aB[i]; });
});
// The host accessor creation will ensure that a wait for kernel to finish
// is triggered and data from device to host is copied
sycl::host_accessor h_acc(CBuf);
for (int i = 0; i < N; i++) {
printf("%d\n", h_acc[i]);
}
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffer-accessors/kern5.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 100;
constexpr int iters = 100;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
sycl::buffer<int> CBuf(&CData[0], N);
{
// Create 2 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
// Kernel1
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aC(CBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 11;
aB[i] = 22;
aC[i] = 0;
});
});
}
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
for (int it = 0; it < iters; it++) {
// Kernel2
Q.submit([&](auto &h) {
// Create device accessors
sycl::accessor aA(ABuf, h, sycl::read_only);
sycl::accessor aB(BBuf, h, sycl::read_only);
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) { aC[i] += aA[i] + aB[i]; });
});
}
sycl::host_accessor h_acc(CBuf);
for (int i = 0; i < N; i++) {
printf("%d\n", h_acc[i]);
}
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffer-accessors/kern4.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 100;
constexpr int iters = 100;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
sycl::buffer<int> CBuf(&CData[0], N);
{
// Create 2 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
// Kernel1
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aC(CBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 11;
aB[i] = 22;
aC[i] = 0;
});
});
}
for (int it = 0; it < iters; it++) {
// Kernel2
Q.submit([&](auto &h) {
// Create device accessors
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) { aC[i] += AData[i] + BData[i]; });
});
}
sycl::host_accessor h_acc(CBuf);
for (int i = 0; i < N; i++) {
printf("%d\n", h_acc[i]);
}
return 0;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/io-kernel/out.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
void out1() {
constexpr int N = 16;
sycl::queue q;
q.submit([&](auto &cgh) {
sycl::stream str(8192, 1024, cgh);
cgh.parallel_for(N, [=](sycl::item<1> it) {
int id = it[0];
/* Send the identifier to a stream to be printed on the console */
str << "ID=" << id << sycl::endl;
});
}).wait();
} // end out1
void out2() {
sycl::queue q;
q.submit([&](auto &cgh) {
sycl::stream str(8192, 4, cgh);
cgh.parallel_for(1, [=](sycl::item<1>) {
str << "ABC" << sycl::endl; // Print statement 1
str << "ABCDEFG" << sycl::endl; // Print statement 2
});
}).wait();
} // end out2
void out3() {
sycl::queue q;
q.submit([&](auto &cgh) {
sycl::stream str(8192, 10, cgh);
cgh.parallel_for(1, [=](sycl::item<1>) {
str << "ABC" << sycl::endl; // Print statement 1
str << "ABCDEFG" << sycl::endl; // Print statement 2
});
}).wait();
} // end out3
void out4() {
sycl::queue q;
q.submit([&](auto &cgh) {
sycl::stream str(8192, 1024, cgh);
cgh.parallel_for(sycl::nd_range<1>(32, 4), [=](sycl::nd_item<1> it) {
int id = it.get_global_id();
str << "ID=" << id << sycl::endl;
});
}).wait();
} // end out4
void out5() {
int *m = NULL;
sycl::queue q;
q.submit([&](auto &cgh) {
sycl::stream str(8192, 1024, cgh);
cgh.parallel_for(sycl::nd_range<1>(32, 4), [=](sycl::nd_item<1> it) {
int id = it.get_global_id();
str << "ID=" << id << sycl::endl;
if (id == 31)
*m = id;
});
}).wait();
} // end out5
int main() {
out1();
out2();
out3();
out4();
out5();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/multiple-kernel-execution/kernels.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
#include <unistd.h>
// Array type and data size for this example.
constexpr size_t array_size = (1 << 15);
typedef std::array<int, array_size> IntArray;
#define iter 10
int multi_queue(sycl::queue &q, const IntArray &a, const IntArray &b) {
IntArray s1, s2, s3;
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf1(s1);
sycl::buffer sum_buf2(s2);
sycl::buffer sum_buf3(s3);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf1, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf2, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf3, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "multi_queue completed on device - took "
<< (end - start).count() << " u-secs\n";
// check results
return ((end - start).count());
} // end multi_queue
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 1;
}
IntArray a, b;
int main() {
sycl::queue q(sycl::default_selector_v);
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// begin in-order submission
sycl::property_list q_prop{sycl::property::queue::in_order()};
std::cout << "In order queue: Jitting+Execution time\n";
sycl::queue q1(sycl::default_selector_v, q_prop);
multi_queue(q1, a, b);
usleep(500 * 1000);
std::cout << "In order queue: Execution time\n";
multi_queue(q1, a, b);
// end in-order submission
// begin out-of-order submission
sycl::queue q2(sycl::default_selector_v);
std::cout << "Out of order queue: Jitting+Execution time\n";
multi_queue(q2, a, b);
usleep(500 * 1000);
std::cout << "Out of order queue: Execution time\n";
multi_queue(q2, a, b);
// end out-of-order submission
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffers/buf-kern2.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 25;
constexpr int STEPS = 100000;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
// Create 3 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
sycl::buffer<int> CBuf(&CData[0], N);
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 10;
aB[i] = 20;
});
});
for (int j = 0; j < STEPS; j++) {
Q.submit([&](auto &h) {
// Create device accessors.
sycl::accessor aA(ABuf, h);
sycl::accessor aB(BBuf, h);
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) {
aC[i] = (aA[i] < aB[i]) ? -1 : 1;
aA[i] += aC[i];
aB[i] -= aC[i];
});
});
} // end for
// Create host accessors.
const sycl::host_accessor haA(ABuf);
const sycl::host_accessor haB(BBuf);
printf("%d %d\n", haA[N / 2], haB[N / 2]);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/buffers/buf-kern1.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <stdio.h>
constexpr int N = 25;
constexpr int STEPS = 100000;
int main() {
int AData[N];
int BData[N];
int CData[N];
sycl::queue Q;
// Create 2 buffers, each holding N integers
sycl::buffer<int> ABuf(&AData[0], N);
sycl::buffer<int> BBuf(&BData[0], N);
Q.submit([&](auto &h) {
// Create device accessors.
// The property no_init lets the runtime know that the
// previous contents of the buffer can be discarded.
sycl::accessor aA(ABuf, h, sycl::write_only, sycl::no_init);
sycl::accessor aB(BBuf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
aA[i] = 10;
aB[i] = 20;
});
});
for (int j = 0; j < STEPS; j++) {
sycl::buffer<int> CBuf(&CData[0], N);
Q.submit([&](auto &h) {
// Create device accessors.
sycl::accessor aA(ABuf, h);
sycl::accessor aB(BBuf, h);
sycl::accessor aC(CBuf, h);
h.parallel_for(N, [=](auto i) {
aC[i] = (aA[i] < aB[i]) ? -1 : 1;
aA[i] += aC[i];
aB[i] -= aC[i];
});
});
} // end for
// Create host accessors.
const sycl::host_accessor haA(ABuf);
const sycl::host_accessor haB(BBuf);
printf("%d %d\n", haA[N / 2], haB[N / 2]);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/matrix/matrix.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "multiply.hpp"
#include <iostream>
typedef unsigned long long UINT64;
#define xstr(s) x_str(s)
#define x_str(s) #s
using namespace std;
// routine to initialize an array with data
void InitArr(TYPE row, TYPE col, TYPE off, TYPE a[][NUM]) {
int i, j;
for (i = 0; i < NUM; i++) {
for (j = 0; j < NUM; j++) {
a[i][j] = row * i + col * j + off;
}
}
}
// routine to print out contents of small arrays
void PrintArr(char *name, TYPE Array[][NUM]) {
int i, j;
cout << "\n" << name << "\n";
for (i = 0; i < NUM; i++) {
for (j = 0; j < NUM; j++) {
cout << Array[i][j] << "\t";
}
cout << endl;
}
}
int main() {
char *buf1, *buf2, *buf3, *buf4;
char *addr1, *addr2, *addr3, *addr4;
Array *a, *b, *c, *t;
int Offset_Addr1 = 128, Offset_Addr2 = 192, Offset_Addr3 = 0,
Offset_Addr4 = 64;
// malloc arrays space
buf1 = new char[NUM * NUM * (sizeof(double)) + 1024];
cout << "Address of buf1 = " << buf1 << endl;
addr1 = buf1 + 256 - ((UINT64)buf1 % 256) + (UINT64)Offset_Addr1;
cout << "Offset of buf1 = " << addr1 << endl;
buf2 = new char[NUM * NUM * (sizeof(double)) + 1024];
cout << "Address of buf2 = " << buf2 << endl;
addr2 = buf2 + 256 - ((UINT64)buf2 % 256) + (UINT64)Offset_Addr2;
cout << "Offset of buf2 = " << addr2 << endl;
buf3 = new char[NUM * NUM * (sizeof(double)) + 1024];
cout << "Address of buf3 = " << buf3 << endl;
addr3 = buf3 + 256 - ((UINT64)buf3 % 256) + (UINT64)Offset_Addr3;
cout << "Offset of buf3 = " << addr3 << endl;
buf4 = new char[NUM * NUM * (sizeof(double)) + 1024];
cout << "Address of buf4 = " << buf4 << endl;
addr4 = buf4 + 256 - ((UINT64)buf4 % 256) + (UINT64)Offset_Addr4;
cout << "Offset of buf4 = " << addr4 << endl;
a = (Array *)addr1;
b = (Array *)addr2;
c = (Array *)addr3;
t = (Array *)addr4;
// initialize the arrays with data
InitArr(3, -2, 1, a);
InitArr(-2, 1, 3, b);
cout << "Using multiply kernel: " << xstr(MULTIPLY) << "\n";
// start timing the matrix multiply code
TimeInterval matrix_time;
ParallelMultiply(NUM, a, b, c, t);
double matrix_elapsed = matrix_time.Elapsed();
cout << "Elapsed Time: " << matrix_elapsed << "s\n";
// free memory
delete[] buf1;
delete[] buf2;
delete[] buf3;
delete[] buf4;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/matrix/multiply.hpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
constexpr int MAXTHREADS = 16;
constexpr int NUM = 1024;
constexpr int MATRIXTILESIZE = 16;
constexpr int WPT = 8;
#include <CL/sycl.hpp>
// exception handler
/*
The exception_list parameter is an iterable list of std::exception_ptr objects.
But those pointers are not always directly readable.
So, we rethrow the pointer, catch it, and then we have the exception itself.
Note: depending upon the operation there may be several exceptions.
*/
auto exception_handler = [](sycl::exception_list exceptionList) {
for (std::exception_ptr const &e : exceptionList) {
try {
std::rethrow_exception(e);
} catch (cl::sycl::exception const &e) {
std::terminate(); // exit the process immediately.
}
}
};
typedef float TYPE;
typedef TYPE Array[NUM];
class TimeInterval {
public:
TimeInterval() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
// Select which multiply kernel to use via the following macro so that the
// kernel being used can be reported when the test is run.
#define MULTIPLY multiply1_2
extern void multiply1(int msize, int tidx, int numt, TYPE a[][NUM],
TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]);
extern void multiply1_1(int msize, int tidx, int numt, TYPE a[][NUM],
TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]);
extern void multiply1_2(int msize, int tidx, int numt, TYPE a[][NUM],
TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]);
extern void ParallelMultiply(int msize, TYPE a[][NUM], TYPE b[][NUM],
TYPE c[][NUM], TYPE t[][NUM]);
|
hpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/matrix/multiply.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// matrix multiply routines
#include "multiply.hpp"
#include <CL/sycl.hpp>
#include <array>
using namespace std;
template <typename T> class Matrix1;
template <typename T> class Matrix1_1;
template <typename T> class Matrix1_2;
// Basic matrix multiply
void multiply1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM],
TYPE c[][NUM], TYPE t[][NUM]) {
int i, j, k;
// Declare a deviceQueue
sycl::queue q(sycl::default_selector_v, exception_handler);
cout << "Running on " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Declare a 2 dimensional range
sycl::range<2> matrix_range{NUM, NUM};
// Declare 3 buffers and Initialize them
sycl::buffer<TYPE, 2> bufferA((TYPE *)a, matrix_range);
sycl::buffer<TYPE, 2> bufferB((TYPE *)b, matrix_range);
sycl::buffer<TYPE, 2> bufferC((TYPE *)c, matrix_range);
// Submit our job to the queue
q.submit([&](auto &h) {
// Declare 3 accessors to our buffers. The first 2 read and the last
// read_write
sycl::accessor accessorA(bufferA, h, sycl::read_only);
sycl::accessor accessorB(bufferB, h, sycl::read_only);
sycl::accessor accessorC(bufferC, h);
// Execute matrix multiply in parallel over our matrix_range
// ind is an index into this range
h.parallel_for(matrix_range, [=](sycl::id<2> ind) {
int k;
for (k = 0; k < NUM; k++) {
// Perform computation ind[0] is row, ind[1] is col
accessorC[ind[0]][ind[1]] +=
accessorA[ind[0]][k] * accessorB[k][ind[1]];
}
});
}).wait_and_throw();
} // multiply1
// Replaces accessorC reference with a local variable
void multiply1_1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM],
TYPE c[][NUM], TYPE t[][NUM]) {
int i, j, k;
// Declare a deviceQueue
sycl::queue q(sycl::default_selector_v, exception_handler);
cout << "Running on " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Declare a 2 dimensional range
sycl::range<2> matrix_range{NUM, NUM};
// Declare 3 buffers and Initialize them
sycl::buffer<TYPE, 2> bufferA((TYPE *)a, matrix_range);
sycl::buffer<TYPE, 2> bufferB((TYPE *)b, matrix_range);
sycl::buffer<TYPE, 2> bufferC((TYPE *)c, matrix_range);
// Submit our job to the queue
q.submit([&](auto &h) {
// Declare 3 accessors to our buffers. The first 2 read and the last
// read_write
sycl::accessor accessorA(bufferA, h, sycl::read_only);
sycl::accessor accessorB(bufferB, h, sycl::read_only);
sycl::accessor accessorC(bufferC, h);
// Execute matrix multiply in parallel over our matrix_range
// ind is an index into this range
h.parallel_for(matrix_range, [=](sycl::id<2> ind) {
int k;
TYPE acc = 0.0;
for (k = 0; k < NUM; k++) {
// Perform computation ind[0] is row, ind[1] is col
acc += accessorA[ind[0]][k] * accessorB[k][ind[1]];
}
accessorC[ind[0]][ind[1]] = acc;
});
}).wait_and_throw();
}
// Replaces accessorC reference with a local variable and adds matrix tiling
void multiply1_2(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM],
TYPE c[][NUM], TYPE t[][NUM]) {
int i, j, k;
// Declare a deviceQueue
sycl::queue q(sycl::default_selector_v, exception_handler);
cout << "Running on " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Declare a 2 dimensional range
sycl::range<2> matrix_range{NUM, NUM};
sycl::range<2> tile_range{MATRIXTILESIZE, MATRIXTILESIZE};
// Declare 3 buffers and Initialize them
sycl::buffer<TYPE, 2> bufferA((TYPE *)a, matrix_range);
sycl::buffer<TYPE, 2> bufferB((TYPE *)b, matrix_range);
sycl::buffer<TYPE, 2> bufferC((TYPE *)c, matrix_range);
// Submit our job to the queue
q.submit([&](auto &h) {
// Declare 3 accessors to our buffers. The first 2 read and the last
// read_write
sycl::accessor accessorA(bufferA, h, sycl::read_only);
sycl::accessor accessorB(bufferB, h, sycl::read_only);
sycl::accessor accessorC(bufferC, h);
// Create matrix tiles
sycl::local_accessor<TYPE, 2> aTile(
sycl::range<2>(MATRIXTILESIZE, MATRIXTILESIZE), h);
sycl::local_accessor<TYPE, 2> bTile(
sycl::range<2>(MATRIXTILESIZE, MATRIXTILESIZE), h);
// Execute matrix multiply in parallel over our matrix_range
// ind is an index into this range
h.parallel_for(sycl::nd_range<2>(matrix_range, tile_range),
[=](cl::sycl::nd_item<2> it) {
int k;
const int numTiles = NUM / MATRIXTILESIZE;
const int row = it.get_local_id(0);
const int col = it.get_local_id(1);
const int globalRow =
MATRIXTILESIZE * it.get_group(0) + row;
const int globalCol =
MATRIXTILESIZE * it.get_group(1) + col;
TYPE acc = 0.0;
for (int t = 0; t < numTiles; t++) {
const int tiledRow = MATRIXTILESIZE * t + row;
const int tiledCol = MATRIXTILESIZE * t + col;
aTile[row][col] = accessorA[globalRow][tiledCol];
bTile[row][col] = accessorB[tiledRow][globalCol];
it.barrier(sycl::access::fence_space::local_space);
for (k = 0; k < MATRIXTILESIZE; k++) {
// Perform computation ind[0] is row, ind[1] is col
acc += aTile[row][k] * bTile[k][col];
}
it.barrier(sycl::access::fence_space::local_space);
}
accessorC[globalRow][globalCol] = acc;
});
}).wait_and_throw();
} // multiply1_2
void ParallelMultiply(int msize, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM],
TYPE t[][NUM]) {
int NTHREADS = MAXTHREADS;
int MSIZE = NUM;
MULTIPLY(MSIZE, NTHREADS, 0, a, b, c, t);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/local-global-sync/atomics_reduction.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
// Summation of 256k 'one' values
constexpr size_t N = 1024 * 256;
// Number of repetitions
constexpr int repetitions = 10000;
// expected vlaue of sum
int sum_expected = N;
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
int reductionIntSerial(sycl::queue &q, std::vector<int> &data, int iter) {
const size_t data_size = data.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) {
size_t glob_id = index[0];
sum_acc[0] = 0;
});
});
Timer timer;
// reductionIntSerial main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
int glob_id = index[0];
if (glob_id == 0) {
int sum = 0;
for (int i = 0; i < N; i++)
sum += buf_acc[i];
sum_acc[0] = sum;
}
});
});
// reductionIntSerial main end
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ReductionIntSerial = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ReductionIntSerial Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end reductionIntSerial
int reductionIntBarrier(sycl::queue &q, std::vector<int> &data, int iter) {
const size_t data_size = data.size();
int sum = 0;
int work_group_size = 512;
int num_work_items = work_group_size;
int num_work_groups = num_work_items / work_group_size;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
double elapsed = 0;
for (int i = 0; i < iter; i++) {
// reductionIntBarrier main begin
Timer timer;
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{work_group_size, work_group_size},
[=](sycl::nd_item<1> item) {
size_t loc_id = item.get_local_id(0);
int sum = 0;
for (int i = loc_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
sum_acc[0] = scratch[0];
});
});
// reductionIntBarrier main end
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ReductionIntBarrier = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ReductionIntBarrier Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end ReductioNIntBarrier
int reductionIntAtomic(sycl::queue &q, std::vector<int> &data, int iter) {
const size_t data_size = data.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) { sum_acc[index] = 0; });
});
Timer timer;
// reductionIntAtomic main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
// reductionIntAtomic main end
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
elapsed += timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ReductionIntAtomic = " << elapsed << "s"
<< " sum = " << sum << "\n";
else
std::cout << "ERROR: ReductionIntAtomic Expected " << sum_expected
<< " but got " << sum << "\n";
return sum;
} // end reductionIntAtomic
int main(int argc, char *argv[]) {
sycl::queue q{sycl::default_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
reductionIntSerial(q, data, 1000);
reductionIntAtomic(q, data, 1000);
reductionIntBarrier(q, data, 1000);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-kernel/naive_matmul.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <cfloat>
#include <chrono>
#include <iostream>
int main(int argc, char **argv) {
if (argc != 3) {
std::cout << "Usage: " << argv[0] << " <m> <k>\n";
exit(1);
}
// Set matrix dimensions
unsigned int m, k, n, M, K, N;
M = N = std::stoi(argv[1]);
K = std::stoi(argv[2]);
// Allocate matrices A, B, and C
float **A = new float *[M];
for (m = 0; m < M; m++)
A[m] = new float[K];
float **B = new float *[K];
for (k = 0; k < K; k++)
B[k] = new float[N];
float **C = new float *[M];
for (m = 0; m < M; m++)
C[m] = new float[N];
// Initialize matrices A and B
for (m = 0; m < M; m++)
for (k = 0; k < K; k++)
A[m][k] = 1.0;
for (k = 0; k < K; k++)
for (n = 0; n < N; n++)
B[k][n] = 1.0;
auto start_time = std::chrono::system_clock::now(); // Start timer
// Multiply matrices A and B
for (m = 0; m < M; m++) {
for (n = 0; n < N; n++) {
C[m][n] = 0.0;
for (k = 0; k < K; k++) {
C[m][n] += A[m][k] * B[k][n];
}
}
} // End matrix multiplication
auto end_time = std::chrono::system_clock::now(); // Stop timer
std::chrono::duration<double> elapsed_time = end_time - start_time;
// Check for correctness
bool errors(false);
for (m = 0; m < M; m++) {
for (n = 0; n < N; n++) {
if (std::abs(C[m][n] - K) > FLT_MIN) {
errors = true;
break;
}
}
}
if (errors)
std::cout << "Program completed with errors." << std::endl;
else {
std::cout << "Program completed without errors." << std::endl;
std::cout << "Naive C++ multiplication of " << M << " x " << K << " and "
<< K << " x " << N << " matrices took " << elapsed_time.count()
<< " seconds." << std::endl;
}
// Cleanup
delete[] A;
delete[] B;
delete[] C;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-kernel/naive_matmul_sycl.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "CL/sycl.hpp"
#include <cfloat>
#include <iostream>
int main(int argc, char **argv) {
// Enter matrix dimensions
unsigned int m, k, n, M, K, N;
if (argc < 3) {
std::cout << "Usage: naive_matmul_sycl M K" << std::endl;
return -1;
}
M = N = std::stoi(argv[1]);
K = std::stoi(argv[2]);
// Initialize SYCL queue
sycl::queue Q(sycl::default_selector_v);
auto sycl_device = Q.get_device();
auto sycl_context = Q.get_context();
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
// Allocate matrices A, B, and C in USM
auto A = sycl::malloc_shared<float *>(M, sycl_device, sycl_context);
for (m = 0; m < M; m++)
A[m] = sycl::malloc_shared<float>(K, sycl_device, sycl_context);
auto B = sycl::malloc_shared<float *>(K, sycl_device, sycl_context);
for (k = 0; k < K; k++)
B[k] = sycl::malloc_shared<float>(N, sycl_device, sycl_context);
auto C = sycl::malloc_shared<float *>(M, sycl_device, sycl_context);
for (m = 0; m < M; m++)
C[m] = sycl::malloc_shared<float>(N, sycl_device, sycl_context);
// Initialize matrices A, B, and C
for (m = 0; m < M; m++)
for (k = 0; k < K; k++)
A[m][k] = 1.0;
for (k = 0; k < K; k++)
for (n = 0; n < N; n++)
B[k][n] = 1.0;
for (m = 0; m < M; m++)
for (n = 0; n < N; n++)
C[m][n] = 0.0;
auto start_time = std::chrono::system_clock::now(); // Start timer
// Offload matrix multiplication kernel
Q.parallel_for(sycl::range<2>{M, N}, [=](sycl::id<2> id) {
unsigned int m = id[0];
unsigned int n = id[1];
float sum = 0.0;
for (unsigned int k = 0; k < K; k++)
sum += A[m][k] * B[k][n];
C[m][n] = sum;
}).wait(); // End matrix multiplication
auto end_time = std::chrono::system_clock::now(); // Stop timer
std::chrono::duration<double> elapsed_time = end_time - start_time;
// Check for correctness
bool errors(false);
for (m = 0; m < M; m++) {
for (n = 0; n < N; n++) {
if (std::abs(C[m][n] - K) > FLT_MIN) {
errors = true;
break;
}
}
}
if (errors)
std::cout << "Program completed with errors." << std::endl;
else {
std::cout << "Program completed without errors." << std::endl;
std::cout << "Naive DPC++ multiplication of " << M << " x " << K << " and "
<< K << " x " << N << " matrices took " << elapsed_time.count()
<< " seconds." << std::endl;
}
// Cleanup
sycl::free(A, Q);
sycl::free(B, Q);
sycl::free(C, Q);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-kernel/matmul_onemkl.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "CL/sycl.hpp"
#include "mkl.h"
#include "oneapi/mkl/blas.hpp"
#include <cfloat>
#include <iostream>
int main(int argc, char **argv) {
// Enter matrix dimensions
unsigned int M, K, N;
if (argc < 3) {
std::cout << "Usage: matmul_onemkl N K" << std::endl;
return -1;
}
M = N = std::stoi(argv[1]);
K = std::stoi(argv[2]);
// Initialize SYCL queue
sycl::queue Q(sycl::default_selector_v);
auto sycl_device = Q.get_device();
auto sycl_context = Q.get_context();
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
// Declare and allocate matrices in USM
auto A = sycl::malloc_shared<float>(M * K, sycl_device, sycl_context);
auto B = sycl::malloc_shared<float>(K * N, sycl_device, sycl_context);
auto C = sycl::malloc_shared<float>(M * N, sycl_device, sycl_context);
// Initialize matrices A and B
for (unsigned int i = 0; i < M * K; i++)
A[i] = 1.0;
for (unsigned int i = 0; i < K * N; i++)
B[i] = 1.0;
auto start_time = std::chrono::system_clock::now(); // Start timer
// Offload matrix multiplication
float alpha = 1.0, beta = 0.0;
oneapi::mkl::transpose transA = oneapi::mkl::transpose::nontrans;
oneapi::mkl::transpose transB = oneapi::mkl::transpose::nontrans;
sycl::event gemm_done;
std::vector<sycl::event> gemm_dependencies;
gemm_done = oneapi::mkl::blas::gemm(Q, transA, transB, M, N, K, alpha, A, M,
B, K, beta, C, M, gemm_dependencies);
gemm_done.wait();
auto end_time = std::chrono::system_clock::now(); // Stop timer
std::chrono::duration<double> elapsed_time = end_time - start_time;
// Check for correctness and report run time
bool errors(false);
for (unsigned int i = 0; i < M * N; i++)
if (std::abs(C[i] - K) > FLT_MIN) {
errors = true;
break;
}
if (errors)
std::cout << "Program completed with errors." << std::endl;
else {
std::cout << "Program completed without errors." << std::endl;
std::cout << "oneMKL SGEMM of " << M << " x " << K << " and " << K << " x "
<< N << " matrices took " << elapsed_time.count() << " seconds."
<< std::endl;
}
// Cleanup
sycl::free(A, Q);
sycl::free(B, Q);
sycl::free(C, Q);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/exec-model/simple.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
auto command_group =
[&](auto &cgh) {
cgh.parallel_for(sycl::range<3>(64, 64, 64), // global range
[=](item<3> it) {
// (kernel code)
})
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/exec-model/vaddsync.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
auto d_selector = sycl::default_selector_v;
// Array type and data size for this example.
constexpr size_t array_size = 3 * 5 * 7 * (1 << 18);
typedef std::array<int, array_size> IntArray;
#define mysize (1 << 18)
// VectorAdd3
template <int groups, int wg_size, int sg_size>
int VectorAdd3(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum, int iter) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
size_t num_groups = groups;
auto start = std::chrono::steady_clock::now();
q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(sg_size)]] {
size_t grp_id = index.get_group()[0];
size_t loc_id = index.get_local_id();
size_t start = grp_id * mysize;
size_t end = start + mysize;
for (int j = 0; j < iter; j++)
for (size_t i = start + loc_id; i < end; i += wg_size) {
sum_acc[i] = a_acc[i] + b_acc[i];
}
});
});
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "VectorAdd3<" << groups << "> completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
} // end VectorAdd3
// VectorAdd4
template <int groups, int wg_size, int sg_size>
int VectorAdd4(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum, int iter) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
size_t num_groups = groups;
auto start = std::chrono::steady_clock::now();
q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(sg_size)]] {
index.barrier(sycl::access::fence_space::local_space);
size_t grp_id = index.get_group()[0];
size_t loc_id = index.get_local_id();
size_t start = grp_id * mysize;
size_t end = start + mysize;
for (int j = 0; j < iter; j++) {
for (size_t i = start + loc_id; i < end; i += wg_size) {
sum_acc[i] = a_acc[i] + b_acc[i];
}
}
});
});
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "VectorAdd4<" << groups << "> completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
} // end VectorAdd4
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = i;
}
void Initialize(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 0;
}
IntArray a, b, sum;
int main() {
sycl::queue q(d_selector);
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// check results
Initialize(sum);
VectorAdd3<6, 320, 8>(q, a, b, sum, 1);
for (int i = 0; i < mysize; i++)
if (sum[i] != 2 * i) {
std::cout << "add3 Did not match\n";
}
Initialize(sum);
VectorAdd4<6, 320, 8>(q, a, b, sum, 1);
for (int i = 0; i < mysize; i++)
if (sum[i] != 2 * i) {
std::cout << "add4 Did not match\n";
}
// group1
Initialize(sum);
VectorAdd3<8, 320, 8>(q, a, b, sum, 10000);
Initialize(sum);
VectorAdd4<8, 320, 8>(q, a, b, sum, 10000);
// end group1
// group2
Initialize(sum);
VectorAdd3<24, 224, 8>(q, a, b, sum, 10000);
Initialize(sum);
VectorAdd4<24, 224, 8>(q, a, b, sum, 10000);
// end group2
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/exec-model/barrier.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
auto command_group =
[&](auto &cgh) {
cgh.parallel_for(nd_range(sycl::range(64, 64, 128), // global range
sycl::range(1, R, 128) // local range
),
[=](sycl::nd_item<3> item) {
// (kernel code)
// Internal synchronization
item.barrier(access::fence_space::global_space);
// (kernel code)
})
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/exec-model/local.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
auto command_group =
[&](auto &cgh) {
// local memory variables shared among work items
sycl::accessor<int, 1, sycl::access::mode::read_write,
sycl::access::target::local>
myLocal(sycl::range(R), cgh);
cgh.parallel_for(nd_range(sycl::range<3>(64, 64, 128), // global range
sycl::range<3>(1, R, 128) // local range
),
[=](ngroup<3> myGroup) {
// (work group code)
myLocal[myGroup.get_local_id()[1]] = ...
})
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/exec-model/vec-add.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <array>
#include <chrono>
#include <iostream>
auto d_selector = sycl::default_selector_v;
// Array type and data size for this example.
constexpr size_t array_size = 3 * 5 * 7 * (1 << 17);
typedef std::array<int, array_size> IntArray;
#define mysize (1 << 17)
int VectorAdd1(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum, int iter) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
auto start = std::chrono::steady_clock::now();
auto e = q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) {
for (int j = 0; j < iter; j++)
sum_acc[i] = a_acc[i] + b_acc[i];
});
});
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "VectorAdd1 completed on device - took " << (end - start).count()
<< " u-secs\n";
return ((end - start).count());
} // end VectorAdd1
template <int groups>
int VectorAdd2(sycl::queue &q, const IntArray &a, const IntArray &b,
IntArray &sum, int iter) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
size_t num_groups = groups;
size_t wg_size = 512;
// get the max wg_sie instead of 512 size_t wg_size = 512;
auto start = std::chrono::steady_clock::now();
q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(32)]] {
size_t grp_id = index.get_group()[0];
size_t loc_id = index.get_local_id();
size_t start = grp_id * mysize;
size_t end = start + mysize;
for (int j = 0; j < iter; j++)
for (size_t i = start + loc_id; i < end; i += wg_size) {
sum_acc[i] = a_acc[i] + b_acc[i];
}
});
});
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "VectorAdd2<" << groups << "> completed on device - took "
<< (end - start).count() << " u-secs\n";
return ((end - start).count());
} // end VectorAdd2
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = i;
}
void Initialize(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 0;
}
IntArray a, b, sum;
int main() {
sycl::queue q(d_selector);
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// check results
Initialize(sum);
VectorAdd1(q, a, b, sum, 1);
for (int i = 0; i < mysize; i++)
if (sum[i] != 2 * i) {
std::cout << "add1 Did not match\n";
}
Initialize(sum);
VectorAdd2<1>(q, a, b, sum, 1);
for (int i = 0; i < mysize; i++)
if (sum[i] != 2 * i) {
std::cout << "add2 Did not match\n";
}
// time the kernels
Initialize(sum);
int t = VectorAdd1(q, a, b, sum, 1000);
Initialize(sum);
t = VectorAdd2<1>(q, a, b, sum, 1000);
t = VectorAdd2<2>(q, a, b, sum, 1000);
t = VectorAdd2<3>(q, a, b, sum, 1000);
t = VectorAdd2<4>(q, a, b, sum, 1000);
t = VectorAdd2<5>(q, a, b, sum, 1000);
t = VectorAdd2<6>(q, a, b, sum, 1000);
t = VectorAdd2<7>(q, a, b, sum, 1000);
t = VectorAdd2<8>(q, a, b, sum, 1000);
t = VectorAdd2<12>(q, a, b, sum, 1000);
t = VectorAdd2<16>(q, a, b, sum, 1000);
t = VectorAdd2<20>(q, a, b, sum, 1000);
t = VectorAdd2<24>(q, a, b, sum, 1000);
t = VectorAdd2<28>(q, a, b, sum, 1000);
t = VectorAdd2<32>(q, a, b, sum, 1000);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/reduction/reduction_1.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
// Summation of 10M 'one' values
constexpr size_t N = (10 * 1024 * 1024);
constexpr int warm_up_token = -1;
// expected value of sum
int sum_expected = N;
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
void flush_cache(sycl::queue &q, sycl::buffer<int> &flush_buf,
const size_t flush_size) {
auto ev = q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
ev.wait_and_throw();
}
int ComputeSerial(std::vector<int> &data,
[[maybe_unused]] std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
Timer timer;
int sum;
// ComputeSerial main begin
for (int it = 0; it < iter; it++) {
sum = 0;
for (size_t i = 0; i < data_size; ++i) {
sum += data[i];
}
}
// ComputeSerial main end
double elapsed = timer.Elapsed() / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeSerial = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeSerial Expected " << sum_expected << " but got "
<< sum << std::endl;
return sum;
} // end ComputeSerial
int ComputeParallel1(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto) { sum_acc[0] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel1 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
// ComputeParallel1 main end
});
q.wait();
{
// ensure limited life-time of host accessor since it blocks the queue
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
}
// do not measure time of warm-up iteration to exclude JIT compilation time
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel1 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel1 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel1
int ComputeParallel2(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_processing_elements =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int BATCH = (N + num_processing_elements - 1) / num_processing_elements;
std::cout << "Num work items = " << num_processing_elements << std::endl;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum_buf(num_processing_elements);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
// init the acummulator on device
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_processing_elements,
[=](auto index) { accum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel2 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_processing_elements, [=](auto index) {
size_t glob_id = index[0];
size_t start = glob_id * BATCH;
size_t end = (glob_id + 1) * BATCH;
if (end > N)
end = N;
int sum = 0;
for (size_t i = start; i < end; ++i)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
// ComputeParallel2 main end
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_processing_elements; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel2 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel2 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel2
int ComputeTreeReduction1(sycl::queue &q, std::vector<int> &data,
std::vector<int> flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int work_group_size = 256;
int num_work_items = data_size;
int num_work_groups = num_work_items / work_group_size;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping one stage reduction example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
std::cout << "One Stage Reduction with " << num_work_items << std::endl;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum_buf(num_work_groups);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_groups,
[=](auto index) { accum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeTreeReduction1 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>(num_work_items, work_group_size),
[=](sycl::nd_item<1> item) {
size_t global_id = item.get_global_id(0);
int local_id = item.get_local_id(0);
int group_id = item.get_group(0);
if (global_id < data_size)
scratch[local_id] = buf_acc[global_id];
else
scratch[local_id] = 0;
// Do a tree reduction on items in work-group
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (local_id < i)
scratch[local_id] += scratch[local_id + i];
}
if (local_id == 0)
accum_acc[group_id] = scratch[0];
});
});
// ComputeTreeReduction1 main end
q.wait();
{
sycl::host_accessor h_acc(accum_buf);
sum = 0;
for (int i = 0; i < num_work_groups; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeTreeReduction1 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeTreeReduction1 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeTreeReduction
int ComputeTreeReduction2(sycl::queue &q, std::vector<int> &data,
std::vector<int> flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int work_group_size = 256;
int num_work_items1 = data_size;
int num_work_groups1 = num_work_items1 / work_group_size;
int num_work_items2 = num_work_groups1;
int num_work_groups2 = num_work_items2 / work_group_size;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping two stage reduction example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
std::cout << "Two Stage Reduction with " << num_work_items1
<< " in stage 1 and " << num_work_items2 << " in stage2"
<< std::endl;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum1_buf(num_work_groups1);
sycl::buffer<int> accum2_buf(num_work_groups2);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum1_acc(accum1_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_groups1,
[=](auto index) { accum1_acc[index] = 0; });
});
q.submit([&](auto &h) {
sycl::accessor accum2_acc(accum2_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_groups2,
[=](auto index) { accum2_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeTreeReduction2 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum1_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>(num_work_items1, work_group_size),
[=](sycl::nd_item<1> item) {
size_t global_id = item.get_global_id(0);
int local_id = item.get_local_id(0);
int group_id = item.get_group(0);
if (global_id < data_size)
scratch[local_id] = buf_acc[global_id];
else
scratch[local_id] = 0;
// Do a tree reduction on items in work-group
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (local_id < i)
scratch[local_id] += scratch[local_id + i];
}
if (local_id == 0)
accum_acc[group_id] = scratch[0];
});
});
q.submit([&](auto &h) {
sycl::accessor buf_acc(accum1_buf, h, sycl::read_only);
sycl::accessor accum_acc(accum2_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>(num_work_items2, work_group_size),
[=](sycl::nd_item<1> item) {
size_t global_id = item.get_global_id(0);
int local_id = item.get_local_id(0);
int group_id = item.get_group(0);
if (global_id < static_cast<size_t>(num_work_items2))
scratch[local_id] = buf_acc[global_id];
else
scratch[local_id] = 0;
// Do a tree reduction on items in work-group
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (local_id < i)
scratch[local_id] += scratch[local_id + i];
}
if (local_id == 0)
accum_acc[group_id] = scratch[0];
});
});
// ComputeTreeReduction2 main end
q.wait();
{
sycl::host_accessor h_acc(accum2_buf);
sum = 0;
for (int i = 0; i < num_work_groups2; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeTreeReduction2 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeTreeReduction2 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeTreeReduction2
int ComputeParallel3(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_processing_elements =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int vec_size =
q.get_device().get_info<sycl::info::device::native_vector_width_int>();
int num_work_items = num_processing_elements * vec_size;
std::cout << "Num work items = " << num_work_items << std::endl;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum_buf(num_work_items);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_items, [=](auto index) { accum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel3 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_items, [=](auto index) {
size_t glob_id = index[0];
int sum = 0;
for (size_t i = glob_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
// ComputeParallel3 main end
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_items; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel3 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel3 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel3
int ComputeParallel4(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
int work_group_size = 256;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping ComputeParallel4 example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_processing_elements =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int vec_size =
q.get_device().get_info<sycl::info::device::native_vector_width_int>();
int num_work_items = num_processing_elements * vec_size * work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum_buf(num_work_items);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_items, [=](auto index) { accum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel4 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_items, [=](auto index) {
size_t glob_id = index[0];
int sum = 0;
for (size_t i = glob_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
// ComputeParallel4 main end
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_items; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel4 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel4 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel4
int ComputeParallel5(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
std::cout << "Compiler built-in reduction Operator" << std::endl;
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) { sum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel5 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
auto sumr = sycl::reduction(sum_buf, h, sycl::plus<>());
h.parallel_for(sycl::nd_range<1>{data_size, 256}, sumr,
[=](sycl::nd_item<1> item, auto &sumr_arg) {
int glob_id = item.get_global_id(0);
sumr_arg += buf_acc[glob_id];
});
});
// ComputeParallel5 main end
q.wait();
{
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
}
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel5 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel5 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel5
int ComputeParallel6(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
int work_group_size = 256;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping ComputeParallel6 example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data_size / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum_buf(num_work_groups);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_groups,
[=](auto index) { accum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel6 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (int i = 0; i < elements_per_work_item; ++i)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
scratch[loc_id] = sum;
// Serial Reduction
item.barrier(sycl::access::fence_space::local_space);
if (loc_id == 0) {
int sum = 0;
for (int i = 0; i < work_group_size; ++i)
sum += scratch[i];
accum_acc[group_id] = sum;
}
});
});
// ComputeParallel6 main end
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_groups; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel6 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel6 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel6
int ComputeParallel7(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
int work_group_size = 256;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping ComputeParallel7 example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data_size / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> accum_buf(num_work_groups);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_groups,
[=](auto index) { accum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel7 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (int i = 0; i < elements_per_work_item; ++i)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
scratch[loc_id] = sum;
// tree reduction
item.barrier(sycl::access::fence_space::local_space);
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < static_cast<size_t>(i))
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
accum_acc[group_id] = scratch[0];
});
});
// ComputeParallel7 main end
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_groups; ++i)
sum += h_acc[i];
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel7 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel7 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel7
int ComputeParallel8(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int work_group_size = 512;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping ComputeParallel8 example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data_size / elements_per_work_item;
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
std::cout << "Compiler built-in reduction Operator with increased work"
<< std::endl;
std::cout << "Elements per item = " << elements_per_work_item << std::endl;
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) { sum_acc[index] = 0; });
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel8 main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
auto sumr = sycl::reduction(sum_buf, h, sycl::plus<>());
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size}, sumr,
[=](sycl::nd_item<1> item, auto &sumr_arg) {
size_t glob_id = item.get_global_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (int i = 0; i < elements_per_work_item; ++i)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
sumr_arg += sum;
});
});
// ComputeParallel8 main end
q.wait();
{
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
}
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel8 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel8 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel8
int ComputeParallel9(sycl::queue &q, std::vector<int> &data,
std::vector<int> &flush, int iter) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum;
int work_group_size = 512;
int max_work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
if (work_group_size > max_work_group_size) {
std::cout << "WARNING: Skipping ComputeParallel9 example "
<< "as the device does not support required work_group_size"
<< std::endl;
return 0;
}
int log2elements_per_work_item = 6;
int elements_per_work_item = (1 << log2elements_per_work_item); // 256
int num_work_items = data_size / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
std::cout << "Num work groups = " << num_work_groups << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> res_buf(&sum, 1);
sycl::buffer<sycl::vec<int, 8>> accum_buf(num_work_groups);
double elapsed = 0;
for (int i = warm_up_token; i < iter; ++i) {
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_groups, [=](auto index) {
sycl::vec<int, 8> x{0, 0, 0, 0, 0, 0, 0, 0};
accum_acc[index] = x;
});
});
flush_cache(q, flush_buf, flush_size);
Timer timer;
// ComputeParallel9 main begin
q.submit([&](auto &h) {
const sycl::accessor buf_acc(buf, h);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<sycl::vec<int, 8>, 1l> scratch(work_group_size, h);
h.parallel_for(
sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
sycl::sub_group sg = item.get_sub_group();
sycl::vec<int, 8> sum{0, 0, 0, 0, 0, 0, 0, 0};
int base = (group_id * work_group_size +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
elements_per_work_item;
for (int i = 0; i < elements_per_work_item / 8; ++i) {
auto buf_ptr = sycl::address_space_cast<
sycl::access::address_space::global_space,
sycl::access::decorated::yes>(&buf_acc[base + i * 128]);
sum += sg.load<8>(buf_ptr);
}
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < static_cast<size_t>(i))
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
accum_acc[group_id] = scratch[0];
});
});
// ComputeParallel9 main end
q.wait();
{
sycl::host_accessor h_acc(accum_buf);
sycl::vec<int, 8> res{0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_work_groups; ++i)
res += h_acc[i];
sum = 0;
for (int i = 0; i < 8; ++i)
sum += res[i];
}
elapsed += (iter == warm_up_token) ? 0 : timer.Elapsed();
}
elapsed = elapsed / iter;
if (sum == sum_expected)
std::cout << "SUCCESS: Time ComputeParallel9 = " << elapsed << "s"
<< " sum = " << sum << std::endl;
else
std::cout << "ERROR: ComputeParallel9 Expected " << sum_expected
<< " but got " << sum << std::endl;
return sum;
} // end ComputeParallel9
int main(void) {
sycl::queue q{sycl::default_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << std::endl;
std::vector<int> data(N, 1);
std::vector<int> extra(N, 1);
ComputeSerial(data, extra, 16);
ComputeParallel1(q, data, extra, 16);
ComputeParallel2(q, data, extra, 16);
ComputeTreeReduction1(q, data, extra, 16);
ComputeTreeReduction2(q, data, extra, 16);
ComputeParallel3(q, data, extra, 16);
ComputeParallel4(q, data, extra, 16);
ComputeParallel5(q, data, extra, 16);
ComputeParallel6(q, data, extra, 16);
ComputeParallel7(q, data, extra, 16);
ComputeParallel8(q, data, extra, 16);
ComputeParallel9(q, data, extra, 16);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/work-group-size/vec-copy.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
// Copy of 32M 'one' values
constexpr size_t N = (32 * 1024 * 1024);
typedef unsigned int uint;
void check_result(double elapsed, std::string msg, std::vector<int> &res) {
bool ok = true;
for (uint i = 0; i < N; i++) {
if (res[i] != 1) {
ok = false;
std::cout << "ERROR: Mismatch at " << i << "\n";
}
}
if (ok)
std::cout << "SUCCESS: Time " << msg << " = " << elapsed << "s\n";
}
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
void vec_copy(sycl::queue &q, std::vector<int> &src, std::vector<int> &dst,
std::vector<int> &flush, int iter, int work_group_size) {
const size_t data_size = src.size();
const size_t flush_size = flush.size();
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_work_items = data_size;
double elapsed = 0;
{
sycl::buffer<int> src_buf(src.data(), data_size, props);
sycl::buffer<int> dst_buf(dst.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
for (int i = 0; i < iter; i++) {
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
Timer timer;
q.submit([&](auto &h) {
sycl::accessor src_acc(src_buf, h, sycl::read_only);
sycl::accessor dst_acc(dst_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_work_items, work_group_size),
[=](sycl::nd_item<1> item)
[[intel::reqd_sub_group_size(16)]] {
int glob_id = item.get_global_id();
dst_acc[glob_id] = src_acc[glob_id];
});
});
q.wait();
elapsed += timer.Elapsed();
}
}
elapsed = elapsed / iter;
std::string msg = "with work-group-size=" + std::to_string(work_group_size);
check_result(elapsed, msg, dst);
} // vec_copy end
int main(void) {
sycl::queue q{sycl::gpu_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> src(N, 1);
std::vector<int> dst(N, 0);
std::vector<int> extra(N, 1);
// call begin
int vec_size = 16;
int work_group_size = vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 2 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 4 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 8 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 16 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
// call end
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/work-group-size/reduction-wg-size.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <unistd.h>
#include <vector>
// Summation of 10M 'one' values
constexpr size_t N = (10 * 1024 * 1024);
// expected vlaue of sum
int sum_expected = N;
void init_data(sycl::queue &q, sycl::buffer<int> &buf, int data_size) {
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
}
void check_result(double elapsed, std::string msg, int sum) {
if (sum == sum_expected)
std::cout << "SUCCESS: Time is " << elapsed << "s" << msg << "\n";
else
std::cout << "ERROR: Expected " << sum_expected << " but got " << sum
<< "\n";
}
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
void reduction(sycl::queue &q, std::vector<int> &data, std::vector<int> &flush,
int iter, int work_group_size) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
// int vec_size =
// q.get_device().get_info<sycl::info::device::native_vector_width_int>();
int num_work_items = data_size / work_group_size;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
init_data(q, buf, data_size);
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) { sum_acc[index] = 0; });
});
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
Timer timer;
// reductionMapToHWVector main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_work_items, work_group_size),
[=](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
auto v =
sycl::atomic_ref<int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(
sum_acc[0]);
int sum = 0;
int glob_id = item.get_global_id();
int loc_id = item.get_local_id();
for (unsigned int i = glob_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
item.barrier(sycl::access::fence_space::local_space);
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
v.fetch_add(scratch[0]);
});
});
q.wait();
elapsed += timer.Elapsed();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
}
elapsed = elapsed / iter;
std::string msg = "with work-groups=" + std::to_string(work_group_size);
check_result(elapsed, msg, sum);
} // reduction end
int main(void) {
sycl::queue q{sycl::gpu_selector_v, exception_handler};
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
std::vector<int> extra(N, 1);
// call begin
int vec_size = 16;
int work_group_size = vec_size;
reduction(q, data, extra, 16, work_group_size);
work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
reduction(q, data, extra, 16, work_group_size);
// call end
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-fcorr/fcorr_1d_usm.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <iostream>
#include <mkl.h>
#include <oneapi/mkl/dfti.hpp>
#include <oneapi/mkl/rng.hpp>
#include <oneapi/mkl/vm.hpp>
int main(int argc, char **argv) {
unsigned int N = (argc == 1) ? 32 : std::stoi(argv[1]);
if ((N % 2) != 0)
N++;
if (N < 32)
N = 32;
// Initialize SYCL queue
sycl::queue Q(sycl::default_selector_v);
auto sycl_device = Q.get_device();
auto sycl_context = Q.get_context();
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
// Initialize signal and correlation arrays
auto sig1 = sycl::malloc_shared<float>(N + 2, sycl_device, sycl_context);
auto sig2 = sycl::malloc_shared<float>(N + 2, sycl_device, sycl_context);
auto corr = sycl::malloc_shared<float>(N + 2, sycl_device, sycl_context);
// Initialize input signals with artificial data
std::uint32_t seed = (unsigned)time(NULL); // Get RNG seed value
oneapi::mkl::rng::mcg31m1 engine(Q, seed); // Initialize RNG engine
// Set RNG distribution
oneapi::mkl::rng::uniform<float, oneapi::mkl::rng::uniform_method::standard>
rng_distribution(-0.00005, 0.00005);
// Warning: These statements run on the device.
auto evt1 =
oneapi::mkl::rng::generate(rng_distribution, engine, N, sig1); // Noise
auto evt2 = oneapi::mkl::rng::generate(rng_distribution, engine, N, sig2);
evt1.wait();
evt2.wait();
// Warning: These statements run on the host, so sig1 and sig2 will have to be
// updated on the device.
sig1[N - N / 4 - 1] = 1.0;
sig1[N - N / 4] = 1.0;
sig1[N - N / 4 + 1] = 1.0; // Signal
sig2[N / 4 - 1] = 1.0;
sig2[N / 4] = 1.0;
sig2[N / 4 + 1] = 1.0;
clock_t start_time = clock(); // Start timer
// Initialize FFT descriptor
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::REAL>
transform_plan(N);
transform_plan.commit(Q);
// Perform forward transforms on real arrays
evt1 = oneapi::mkl::dft::compute_forward(transform_plan, sig1);
evt2 = oneapi::mkl::dft::compute_forward(transform_plan, sig2);
// Compute: DFT(sig1) * CONJG(DFT(sig2))
oneapi::mkl::vm::mulbyconj(
Q, N / 2, reinterpret_cast<std::complex<float> *>(sig1),
reinterpret_cast<std::complex<float> *>(sig2),
reinterpret_cast<std::complex<float> *>(corr), {evt1, evt2})
.wait();
// Perform backward transform on complex correlation array
oneapi::mkl::dft::compute_backward(transform_plan, corr).wait();
clock_t end_time = clock(); // Stop timer
std::cout << "The 1D correlation (N = " << N << ") took "
<< float(end_time - start_time) / CLOCKS_PER_SEC << " seconds."
<< std::endl;
// Find the shift that gives maximum correlation value
float max_corr = 0.0;
int shift = 0;
for (unsigned int idx = 0; idx < N; idx++) {
if (corr[idx] > max_corr) {
max_corr = corr[idx];
shift = idx;
}
}
int _N = static_cast<int>(N);
shift =
(shift > _N / 2) ? shift - _N : shift; // Treat the signals as circularly
// shifted versions of each other.
std::cout << "Shift the second signal " << shift
<< " elements relative to the first signal to get a maximum, "
"normalized correlation score of "
<< max_corr / N << "." << std::endl;
// Cleanup
sycl::free(sig1, sycl_context);
sycl::free(sig2, sycl_context);
sycl::free(corr, sycl_context);
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-fcorr/fcorr_1d_buffers.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <iostream>
#include <mkl.h>
#include <oneapi/mkl/dfti.hpp>
#include <oneapi/mkl/rng.hpp>
#include <oneapi/mkl/vm.hpp>
int main(int argc, char **argv) {
unsigned int N = (argc == 1) ? 32 : std::stoi(argv[1]);
if ((N % 2) != 0)
N++;
if (N < 32)
N = 32;
// Initialize SYCL queue
sycl::queue Q(sycl::default_selector_v);
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
// Create buffers for signal data. This will only be used on the device.
sycl::buffer<float> sig1_buf{N + 2};
sycl::buffer<float> sig2_buf{N + 2};
// Declare container to hold the correlation result (computed on the device,
// used on the host)
std::vector<float> corr(N + 2);
// Open new scope to trigger update of correlation result
{
sycl::buffer<float> corr_buf(corr);
// Initialize the input signals with artificial data
std::uint32_t seed = (unsigned)time(NULL); // Get RNG seed value
oneapi::mkl::rng::mcg31m1 engine(Q, seed); // Initialize RNG engine
// Set RNG distribution
oneapi::mkl::rng::uniform<float, oneapi::mkl::rng::uniform_method::standard>
rng_distribution(-0.00005, 0.00005);
oneapi::mkl::rng::generate(rng_distribution, engine, N, sig1_buf); // Noise
oneapi::mkl::rng::generate(rng_distribution, engine, N, sig2_buf);
Q.submit([&](sycl::handler &h) {
sycl::accessor sig1_acc{sig1_buf, h, sycl::write_only};
sycl::accessor sig2_acc{sig2_buf, h, sycl::write_only};
h.single_task<>([=]() {
sig1_acc[N - N / 4 - 1] = 1.0;
sig1_acc[N - N / 4] = 1.0;
sig1_acc[N - N / 4 + 1] = 1.0; // Signal
sig2_acc[N / 4 - 1] = 1.0;
sig2_acc[N / 4] = 1.0;
sig2_acc[N / 4 + 1] = 1.0;
});
}); // End signal initialization
clock_t start_time = clock(); // Start timer
// Initialize FFT descriptor
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::REAL>
transform_plan(N);
transform_plan.commit(Q);
// Perform forward transforms on real arrays
oneapi::mkl::dft::compute_forward(transform_plan, sig1_buf);
oneapi::mkl::dft::compute_forward(transform_plan, sig2_buf);
// Compute: DFT(sig1) * CONJG(DFT(sig2))
auto sig1_buf_cplx =
sig1_buf.template reinterpret<std::complex<float>, 1>((N + 2) / 2);
auto sig2_buf_cplx =
sig2_buf.template reinterpret<std::complex<float>, 1>((N + 2) / 2);
auto corr_buf_cplx =
corr_buf.template reinterpret<std::complex<float>, 1>((N + 2) / 2);
oneapi::mkl::vm::mulbyconj(Q, N / 2, sig1_buf_cplx, sig2_buf_cplx,
corr_buf_cplx);
// Perform backward transform on complex correlation array
oneapi::mkl::dft::compute_backward(transform_plan, corr_buf);
clock_t end_time = clock(); // Stop timer
std::cout << "The 1D correlation (N = " << N << ") took "
<< float(end_time - start_time) / CLOCKS_PER_SEC << " seconds."
<< std::endl;
} // Buffer holding correlation result is now out of scope, forcing update of
// host container
// Find the shift that gives maximum correlation value
float max_corr = 0.0;
int shift = 0;
for (unsigned int idx = 0; idx < N; idx++) {
if (corr[idx] > max_corr) {
max_corr = corr[idx];
shift = idx;
}
}
int _N = static_cast<int>(N);
shift =
(shift > _N / 2) ? shift - _N : shift; // Treat the signals as circularly
// shifted versions of each other.
std::cout << "Shift the second signal " << shift
<< " elements relative to the first signal to get a maximum, "
"normalized correlation score of "
<< max_corr / N << "." << std::endl;
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/libraries-fcorr/fcorr_1d_usm_fixed.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#include <iostream>
#include <mkl.h>
#include <oneapi/mkl/dfti.hpp>
#include <oneapi/mkl/rng.hpp>
#include <oneapi/mkl/vm.hpp>
int main(int argc, char **argv) {
unsigned int N = (argc == 1) ? 32 : std::stoi(argv[1]);
if ((N % 2) != 0)
N++;
if (N < 32)
N = 32;
// Initialize SYCL queue
sycl::queue Q(sycl::default_selector_v);
auto sycl_device = Q.get_device();
auto sycl_context = Q.get_context();
std::cout << "Running on: "
<< Q.get_device().get_info<sycl::info::device::name>() << std::endl;
// Initialize signal and correlation arrays
auto sig1 = sycl::malloc_shared<float>(N + 2, sycl_device, sycl_context);
auto sig2 = sycl::malloc_shared<float>(N + 2, sycl_device, sycl_context);
auto corr = sycl::malloc_shared<float>(N + 2, sycl_device, sycl_context);
// Initialize input signals with artificial data
std::uint32_t seed = (unsigned)time(NULL); // Get RNG seed value
oneapi::mkl::rng::mcg31m1 engine(Q, seed); // Initialize RNG engine
// Set RNG distribution
oneapi::mkl::rng::uniform<float, oneapi::mkl::rng::uniform_method::standard>
rng_distribution(-0.00005, 0.00005);
auto evt1 =
oneapi::mkl::rng::generate(rng_distribution, engine, N, sig1); // Noise
auto evt2 = oneapi::mkl::rng::generate(rng_distribution, engine, N, sig2);
evt1.wait();
evt2.wait();
Q.single_task<>([=]() {
sig1[N - N / 4 - 1] = 1.0;
sig1[N - N / 4] = 1.0;
sig1[N - N / 4 + 1] = 1.0; // Signal
sig2[N / 4 - 1] = 1.0;
sig2[N / 4] = 1.0;
sig2[N / 4 + 1] = 1.0;
}).wait();
clock_t start_time = clock(); // Start timer
// Initialize FFT descriptor
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::REAL>
transform_plan(N);
transform_plan.commit(Q);
// Perform forward transforms on real arrays
evt1 = oneapi::mkl::dft::compute_forward(transform_plan, sig1);
evt2 = oneapi::mkl::dft::compute_forward(transform_plan, sig2);
// Compute: DFT(sig1) * CONJG(DFT(sig2))
oneapi::mkl::vm::mulbyconj(
Q, N / 2, reinterpret_cast<std::complex<float> *>(sig1),
reinterpret_cast<std::complex<float> *>(sig2),
reinterpret_cast<std::complex<float> *>(corr), {evt1, evt2})
.wait();
// Perform backward transform on complex correlation array
oneapi::mkl::dft::compute_backward(transform_plan, corr).wait();
clock_t end_time = clock(); // Stop timer
std::cout << "The 1D correlation (N = " << N << ") took "
<< float(end_time - start_time) / CLOCKS_PER_SEC << " seconds."
<< std::endl;
// Find the shift that gives maximum correlation value
float max_corr = 0.0;
int shift = 0;
for (unsigned int idx = 0; idx < N; idx++) {
if (corr[idx] > max_corr) {
max_corr = corr[idx];
shift = idx;
}
}
int _N = static_cast<int>(N);
shift =
(shift > _N / 2) ? shift - _N : shift; // Treat the signals as circularly
// shifted versions of each other.
std::cout << "Shift the second signal " << shift
<< " elements relative to the first signal to get a maximum, "
"normalized correlation score of "
<< max_corr / N << "." << std::endl;
// Cleanup
sycl::free(sig1, sycl_context);
sycl::free(sig2, sycl_context);
sycl::free(corr, sycl_context);
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/overlap-data-transfers/reduction.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <chrono>
#include <iostream>
#include <string>
#include <vector>
// Summation of 10M 'one' values
constexpr size_t N = (10 * 1024 * 1024);
// Number of repetitions
constexpr int repetitions = 16;
static auto exception_handler = [](sycl::exception_list eList) {
for (std::exception_ptr const &e : eList) {
try {
std::rethrow_exception(e);
} catch (std::exception const &e) {
#if DEBUG
std::cout << "Failure" << std::endl;
#endif
std::terminate();
}
}
};
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
float ComputeSerial(std::vector<float> &data) {
const size_t data_size = data.size();
float sum = 0;
for (size_t i = 0; i < data_size; ++i) {
sum += data[i];
}
return sum;
} // end ComputeSerial
float ComputeParallel1(sycl::queue &q, std::vector<float> &data) {
const size_t data_size = data.size();
float sum = 0;
static float *accum = 0;
if (data_size > 0) {
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_EUs =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int vec_size =
q.get_device()
.get_info<sycl::info::device::native_vector_width_float>();
int num_processing_elements = num_EUs * vec_size;
int BATCH = (N + num_processing_elements - 1) / num_processing_elements;
sycl::buffer<float> buf(data.data(), data.size(), props);
sycl::buffer<float> accum_buf(accum, num_processing_elements, props);
if (!accum)
accum = new float[num_processing_elements];
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_processing_elements, [=](auto index) {
size_t glob_id = index[0];
size_t start = glob_id * BATCH;
size_t end = (glob_id + 1) * BATCH;
if (end > N)
end = N;
float sum = 0.0;
for (size_t i = start; i < end; i++)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
q.wait();
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_processing_elements; i++)
sum += h_acc[i];
}
return sum;
} // end ComputeParallel1
float ComputeParallel2(sycl::queue &q, std::vector<float> &data) {
const size_t data_size = data.size();
float sum = 0;
static float *accum = 0;
if (data_size > 0) {
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_EUs =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int vec_size =
q.get_device()
.get_info<sycl::info::device::native_vector_width_float>();
int num_processing_elements = num_EUs * vec_size;
int BATCH = (N + num_processing_elements - 1) / num_processing_elements;
sycl::buffer<float> buf(data.data(), data.size(), props);
sycl::buffer<float> accum_buf(accum, num_processing_elements, props);
sycl::buffer<float> res_buf(&sum, 1, props);
if (!accum)
accum = new float[num_processing_elements];
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_processing_elements, [=](auto index) {
size_t glob_id = index[0];
size_t start = glob_id * BATCH;
size_t end = (glob_id + 1) * BATCH;
if (end > N)
end = N;
float sum = 0.0;
for (size_t i = start; i < end; i++)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
q.submit([&](auto &h) {
sycl::accessor accum_acc(accum, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) {
res_acc[index] = 0;
for (size_t i = 0; i < num_processing_elements; i++)
res_acc[index] += accum_acc[i];
});
});
}
// Buffers go out of scope and data gets transferred from device to host
return sum;
} // end ComputeParallel2
int main(int argc, char *argv[]) {
sycl::queue q{default_selector{}, exception_handler};
std::vector<float> data(N, 1.0f);
std::vector<float> extra(4 * N, 1.0f);
float sum_s = 0, sum = 0;
double elapsed_s = 0;
for (int k = 0; k < repetitions; ++k) {
// Flush the cache
(void)ComputeSerial(extra);
Timer timer_s;
// Time the summation
sum_s = ComputeSerial(data);
elapsed_s += timer_s.Elapsed();
}
elapsed_s /= repetitions;
std::cout << "Time Serial = " << elapsed_s << "s"
<< " sum = " << sum_s << "\n";
double elapsed_p1 = 0;
for (int k = 0; k < repetitions; ++k) {
// Flush the cache
(void)ComputeParallel1(q, extra);
Timer timer_s;
// Time the summation
sum = ComputeParallel1(q, data);
elapsed_p1 += timer_s.Elapsed();
}
elapsed_p1 /= repetitions;
std::cout << "Time parallel1 = " << elapsed_p1 << "s"
<< " sum = " << sum << "\n";
double elapsed_p2 = 0;
for (int k = 0; k < repetitions; ++k) {
// Flush the cache
(void)ComputeParallel2(q, extra);
Timer timer_s;
// Time the summation
sum = ComputeParallel2(q, data);
elapsed_p2 += timer_s.Elapsed();
}
elapsed_p2 /= repetitions;
std::cout << "Time parallel2 = " << elapsed_p2 << "s"
<< " sum = " << sum << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/overlap-data-transfers/overlap.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Snippet begin
#include <CL/sycl.hpp>
#define NITERS 10
#define KERNEL_ITERS 10000
#define NUM_CHUNKS 10
#define CHUNK_SIZE 10000000
class Timer {
public:
Timer() : start_(std::chrono::steady_clock::now()) {}
double Elapsed() {
auto now = std::chrono::steady_clock::now();
return std::chrono::duration_cast<Duration>(now - start_).count();
}
private:
using Duration = std::chrono::duration<double>;
std::chrono::steady_clock::time_point start_;
};
int main() {
const int num_chunks = NUM_CHUNKS;
const int chunk_size = CHUNK_SIZE;
const int iter = NITERS;
sycl::queue q;
// Allocate and initialize host data
float *host_data[num_chunks];
for (int c = 0; c < num_chunks; c++) {
host_data[c] = sycl::malloc_host<float>(chunk_size, q);
float val = c;
for (int i = 0; i < chunk_size; i++)
host_data[c][i] = val;
}
std::cout << "Allocated host data\n";
// Allocate and initialize device memory
float *device_data[num_chunks];
for (int c = 0; c < num_chunks; c++) {
device_data[c] = sycl::malloc_device<float>(chunk_size, q);
float val = 1000.0;
q.fill<float>(device_data[c], val, chunk_size);
}
q.wait();
std::cout << "Allocated device data\n";
Timer timer;
for (int it = 0; it < iter; it++) {
for (int c = 0; c < num_chunks; c++) {
auto add_one = [=](auto id) {
for (int i = 0; i < KERNEL_ITERS; i++)
device_data[c][id] += 1.0;
};
// Copy-in not dependent on previous event
auto copy_in =
q.memcpy(device_data[c], host_data[c], sizeof(float) * chunk_size);
// Compute waits for copy_in
auto compute = q.parallel_for(chunk_size, copy_in, add_one);
auto cg = [=](auto &h) {
h.depends_on(compute);
h.memcpy(host_data[c], device_data[c], sizeof(float) * chunk_size);
};
// Copy out waits for compute
auto copy_out = q.submit(cg);
}
q.wait();
}
auto elapsed = timer.Elapsed() / iter;
for (int c = 0; c < num_chunks; c++) {
for (int i = 0; i < chunk_size; i++) {
if (host_data[c][i] != (float)((c + KERNEL_ITERS * iter))) {
std::cout << "Mismatch for chunk: " << c << " position: " << i
<< " expected: " << c + 10000 << " got: " << host_data[c][i]
<< "\n";
break;
}
}
}
std::cout << "Time = " << elapsed << " usecs\n";
}
// Snippet end
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/CXX/OpenMP/hello_omp.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
#include <omp.h>
#include <sstream>
int main(int argc, char *argv[]) {
std::cout << "Hello, OpenMP C World!" << std::endl;
#pragma omp parallel
{
std::ostringstream msg;
msg << " I am thread " << omp_get_thread_num() << std::endl;
std::cout << msg.str();
}
std::cout << "All done, bye." << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/CXX/IPO/main.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "ipo_cxx_lib.h"
#include <iostream>
int main(int argc, char *argv[]) {
int ans = plus3(argc);
std::cout << argc << " + 3 = " << ans << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/CXX/IPO/ipo_cxx_lib.h
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#pragma once
int plus3(int x);
|
h
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/CXX/IPO/ipo_cxx_lib1.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "ipo_cxx_lib.h"
int plus3(int x) { return x + 3; }
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/SYCL/target_link_library/simple.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
using namespace sycl;
static const int N = 16;
int main() {
// # define queue which has default device associated for offload
queue q;
std::cout << "Device: " << q.get_device().get_info<info::device::name>()
<< std::endl;
// # Unified Shared Memory Allocation enables data access on host and device
int *data = malloc_shared<int>(N, q);
std::cout << "allocated shared memory" << std::endl;
// # Initialization
for (int i = 0; i < N; i++) {
data[i] = i;
}
// # Offload parallel computation to device
q.parallel_for(range<1>(N), [=](id<1> i) { data[i] *= 2; }).wait();
// # Print Output
for (int i = 0; i < N; i++)
std::cout << data[i] << std::endl;
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/SYCL/add_sycl_to_target/simple.cpp
|
//==============================================================
// Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
//
// The main program has no SYCL calls. It just implements the main program
// logic and calls routines defined elsewhere to do the real work.
//
#include "offload.h"
#include <iostream>
#include <vector>
int main(int argc, char *argv[]) {
int n_items = 16;
if (argc > 1) {
n_items = atoi(argv[1]);
}
std::vector<int> ans(n_items);
do_work(ans);
// # Print Output
for (auto &&x : ans) {
std::cout << x << std::endl;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/SYCL/add_sycl_to_target/offload.h
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef OFFLOAD_H
#define OFFLOAD_H
#include <vector>
extern void do_work(std::vector<int> &ans);
#endif
|
h
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/SYCL/add_sycl_to_target/offload.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "offload.h"
#include <sycl/sycl.hpp>
#include <iostream>
#include <vector>
void do_work(std::vector<int> &ans) {
// # define queue which has default device associated for offload
sycl::queue q;
std::cout << "Using device: "
<< q.get_device().get_info<sycl::info::device::name>() << std::endl;
// # Unified Shared Memory Allocation enables data access on host and device
int *data = sycl::malloc_shared<int>(ans.size(), q);
assert(data);
// # Initialization
for (int i = 0; i < ans.size(); i++) {
data[i] = i;
}
// # Offload parallel computation to device
size_t n_items = ans.size();
q.parallel_for(sycl::range<1>(n_items), [=](sycl::id<1> i) {
data[i] *= 2;
}).wait();
for (int i = 0; i < ans.size(); ++i) {
ans[i] = data[i];
}
sycl::free(data, q);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/C/OpenMP/hello_omp.c
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
printf("Hello, OpenMP C World!\n");
#pragma omp parallel
{ printf(" I am thread %d\n", omp_get_thread_num()); }
printf("All done, bye.\n");
return 0;
}
|
c
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/C/IPO/main.c
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "ipo_c_lib.h"
#include <stdio.h>
int main(int argc, char *argv[]) {
int ans = plus3(argc);
printf("%d + 3 = %d\n", argc, ans);
return 0;
}
|
c
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/C/IPO/ipo_c_lib1.c
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "ipo_c_lib.h"
#include <stdio.h>
int plus3(int x) { return x + 3; }
|
c
|
oneAPI-samples
|
data/projects/oneAPI-samples/Templates/cmake/C/IPO/ipo_c_lib.h
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef IPO_C_LIB1_H
#define IPO_C_LIB1_H
int plus3(int x);
#endif
|
h
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/MPI/mpi_send_gpu/src/mpi_send_gpu_sycl.cpp
|
/*==============================================================
* Copyright © 2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
* ============================================================= */
/* Description:
* Sending GPU buffer from <ACTIVE_RANK> to the host buffer of another rank.
*
* How to run:
* mpiexec -n 2 -genv I_MPI_OFFLOAD=1 -genv LIBOMPTARGET_PLUGIN=level0 ./mpi_send_gpu_buf_sycl
*/
#include <mpi.h>
#include <sycl.hpp>
#include <stdio.h>
#define ACTIVE_RANK 1
void VerifyResult(int *result, int *values, unsigned num_values, int rank)
{
/* Validation */
printf("[%d] result: ", rank);
for (unsigned i = 0; i < num_values; ++i) {
printf("%d ", result[i]);
/* Signal an error if the result does not match the expected values */
if (result[i] != values[i] * (ACTIVE_RANK + 1)) {
printf("\n"); fflush(stdout);
fprintf(stderr, "[%d] VALIDATION ERROR (expected %d)\n", rank, values[i] * (ACTIVE_RANK + 1));
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
printf("\n");
}
int main(int argc, char **argv) {
int nranks, rank;
const unsigned num_values = 10;
sycl::queue q(sycl::gpu_selector_v);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (nranks != 2) {
if (rank == 0) fprintf(stderr, "run mpiexec with -n 2\n");
MPI_Finalize();
return 1;
}
int values[num_values];
values[0] = 0;
values[1] = 1;
for (unsigned i = 2; i < num_values; i++) {
values[i] = values[i - 2] + values[i - 1];
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == ACTIVE_RANK) {
int *device_values = sycl::malloc_device < int >(num_values, q);
int *host_values = sycl::malloc_host < int >(num_values, q);
q.memcpy(device_values, values, sizeof(values)).wait();
q.submit([&](auto & h) {
h.parallel_for(sycl::range(num_values), [=] (auto index) {
device_values[index[0]] *= (rank + 1);
});
}).wait();
q.memcpy(host_values, device_values, sizeof(values)).wait();
/* Send device buffer to another rank (host recieve buffer) */
printf("[%d] Sending GPU buffer %p to rank %d\n", rank, device_values, 1 - rank); fflush(stdout);
MPI_Send(device_values, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD);
/* Send device buffer to another rank (device recieve buffer) */
printf("[%d] Sending GPU buffer %p to rank %d\n", rank, device_values, 1 - rank); fflush(stdout);
MPI_Send(device_values, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD);
/* Send host buffer to another rank (device recieve buffer) */
printf("[%d] Sending host buffer %p to rank %d\n", rank, host_values, 1 - rank); fflush(stdout);
MPI_Send(host_values, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD);
} else {
int result [num_values];
int *device_result = sycl::malloc_device < int >(num_values, q);
int *host_result = sycl::malloc_host < int >(num_values, q);
/* Receive values from device buffer and store it in the host buffer */
printf("[%d] Receiving data from rank %d to host buffer\n", rank, 1 - rank); fflush(stdout);
MPI_Recv(host_result, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
q.memcpy(result, host_result, sizeof(result)).wait();
VerifyResult(result, values, num_values, rank);
/* Receive values from device buffer and store it in the device buffer */
printf("[%d] Receiving data from rank %d to GPU buffer\n", rank, 1 - rank); fflush(stdout);
MPI_Recv(device_result, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
q.memcpy(result, device_result, sizeof(result)).wait();
VerifyResult(result, values, num_values, rank);
/* Receive values from device buffer and store it in the device buffer */
printf("[%d] Receiving data from rank %d to GPU buffer\n", rank, 1 - rank); fflush(stdout);
MPI_Recv(device_result, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
q.memcpy(result, device_result, sizeof(result)).wait();
VerifyResult(result, values, num_values, rank);
printf("[%d] SUCCESS\n", rank);
}
MPI_Finalize();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/MPI/mpi_send_gpu/src/mpi_send_gpu_omp.c
|
/*==============================================================
* Copyright © 2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
* ============================================================= */
/* Description:
* Sending GPU buffer from <ACTIVE_RANK> to the host buffer of another rank.
*
* How to run:
* mpiexec -n 2 -genv I_MPI_OFFLOAD=1 -genv LIBOMPTARGET_PLUGIN=level0 ./mpi_send_gpu_buf_omp
*/
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#define ACTIVE_RANK 1
void VerifyResult(int *result, int *values, unsigned num_values, int rank)
{
/* Validation */
printf("[%d] result: ", rank);
for (unsigned i = 0; i < num_values; ++i) {
printf("%d ", result[i]);
/* Signal an error if the result does not match the expected values */
if (result[i] != values[i] * (ACTIVE_RANK + 1)) {
printf("\n"); fflush(stdout);
fprintf(stderr, "[%d] VALIDATION ERROR (expected %d)\n", rank, values[i] * (ACTIVE_RANK + 1));
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
printf("\n");
}
int main(int argc, char **argv) {
int nranks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (nranks != 2) {
if (rank == 0) fprintf(stderr, "run mpiexec with -n 2\n");
MPI_Finalize();
return 1;
}
const unsigned num_values = 10;
int *values = (int *) malloc (num_values * sizeof(int));
if (values == NULL) {
fprintf(stderr, "[%d] could not allocate memory\n", rank);
MPI_Abort(MPI_COMM_WORLD, 1);
}
values[0] = 0;
values[1] = 1;
for (unsigned i = 2; i < num_values; i++) {
values[i] = values[i - 2] + values[i - 1];
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == ACTIVE_RANK) {
/* Copy `rank` and `values` from host to device */
#pragma omp target data map(to: rank, values[0:num_values]) use_device_ptr(values)
{
/* Compute something on device */
#pragma omp target parallel for is_device_ptr(values)
for (unsigned i = 0; i < num_values; ++i) {
values[i] *= (rank + 1);
}
/* Send device buffer to another rank (host recieve buffer) */
printf("[%d] Sending GPU buffer %p to rank %d\n", rank, values, 1 - rank); fflush(stdout);
MPI_Send(values, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD);
/* Send device buffer to another rank (device recieve buffer) */
printf("[%d] Sending GPU buffer %p to rank %d\n", rank, values, 1 - rank); fflush(stdout);
MPI_Send(values, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD);
}
/* Send host buffer to another rank (device recieve buffer) */
printf("[%d] Sending host buffer %p to rank %d\n", rank, values, 1 - rank); fflush(stdout);
MPI_Send(values, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD);
} else {
int *result = (int *) malloc (num_values * sizeof(int));
if (result == NULL) {
fprintf(stderr, "[%d] could not allocate memory\n", rank);
MPI_Abort(MPI_COMM_WORLD, 1);
}
/* Receive values from device buffer and store it on the host buffer */
printf("[%d] Receiving data from rank %d to host buffer\n", rank, 1 - rank); fflush(stdout);
MPI_Recv(result, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
VerifyResult(result, values, num_values, rank);
/* Copy `rank` and `values` from host to device and backward */
#pragma omp target data map(to: rank, result[0:num_values]) map(from: result[0:num_values]) use_device_ptr(result)
{
/* Reset recieve buffer */
#pragma omp target parallel for is_device_ptr(result)
for (unsigned i = 0; i < num_values; ++i) {
result[i] = 0;
}
/* Receive values from device buffer and store it in the device buffer */
printf("[%d] Receiving data from rank %d to GPU buffer\n", rank, 1 - rank); fflush(stdout);
MPI_Recv(result, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
VerifyResult(result, values, num_values, rank);
/* Copy `rank` and `values` from host to device and backward */
#pragma omp target data map(to: rank, result[0:num_values]) map(from: result[0:num_values]) use_device_ptr(result)
{
/* Reset recieve buffer */
#pragma omp target parallel for is_device_ptr(result)
for (unsigned i = 0; i < num_values; ++i) {
result[i] = 0;
}
/* Receive values from host buffer and store it in the device buffer */
printf("[%d] Receiving data from rank %d to GPU buffer\n", rank, 1 - rank); fflush(stdout);
MPI_Recv(result, num_values, MPI_INT, 1 - rank, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
VerifyResult(result, values, num_values, rank);
printf("[%d] SUCCESS\n", rank);
free(result);
}
free(values);
MPI_Finalize();
return 0;
}
|
c
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/MPI/jacobian_solver/src/02_jacobian_device_mpi_one-sided_gpu_aware/mpi3_onesided_jacobian_gpu_sycl.cpp
|
/*==============================================================
* Copyright © 2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
* ============================================================= */
/* Distributed Jacobian computation sample using OpenMP GPU offload and MPI-3 one-sided.
*/
#include "mpi.h"
#include <sycl.hpp>
#include <vector>
#include <iostream>
const int Nx = 16384; /* Grid size */
const int Ny = Nx;
const int Niter = 100; /* Nuber of algorithm iterations */
const int NormIteration = 10; /* Recaluculate norm after given number of iterations. 0 to disable norm calculation */
const int PrintTime = 1; /* Output overall time of compute/communication part */
struct subarray {
int rank, comm_size; /* MPI rank and communicator size */
int x_size, y_size; /* Subarray size excluding border rows and columns */
MPI_Aint l_nbh_offt; /* Offset predecessor data to update */
};
#define ROW_SIZE(S) ((S).x_size + 2)
#define XY_2_IDX(X,Y,S) (((Y)+1)*ROW_SIZE(S)+((X)+1))
/* Subroutine to create and initialize initial state of input subarrays */
void InitDeviceArrays(double **A_dev_1, double **A_dev_2, sycl::queue q, struct subarray *sub)
{
size_t total_size = (sub->x_size + 2) * (sub->y_size + 2);
double *A = sycl::malloc_host < double >(total_size, q);
*A_dev_1 = sycl::malloc_device < double >(total_size, q);
*A_dev_2 = sycl::malloc_device < double >(total_size, q);
for (int i = 0; i < (sub->y_size + 2); i++)
for (int j = 0; j < (sub->x_size + 2); j++)
A[i * (sub->x_size + 2) + j] = 0.0;
if (sub->rank == 0) /* set top boundary */
for (int i = 1; i <= sub->x_size; i++)
A[i] = 1.0; /* set bottom boundary */
if (sub->rank == (sub->comm_size - 1))
for (int i = 1; i <= sub->x_size; i++)
A[(sub->x_size + 2) * (sub->y_size + 1) + i] = 10.0;
for (int i = 1; i <= sub->y_size; i++) {
int row_offt = i * (sub->x_size + 2);
A[row_offt] = 1.0; /* set left boundary */
A[row_offt + sub->x_size + 1] = 1.0; /* set right boundary */
}
/* Move input arrays to device */
q.memcpy(*A_dev_1, A, sizeof(double) * total_size);
q.memcpy(*A_dev_2, A, sizeof(double) * total_size);
q.wait();
}
/* Setup subarray size and layout processed by current rank */
void GetMySubarray(struct subarray *sub)
{
MPI_Comm_size(MPI_COMM_WORLD, &sub->comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &sub->rank);
sub->y_size = Ny / sub->comm_size;
sub->x_size = Nx;
sub->l_nbh_offt = (sub->x_size + 2) * (sub->y_size + 1) + 1;
int tail = sub->y_size % sub->comm_size;
if (tail != 0) {
if (sub->rank < tail)
sub->y_size++;
if ((sub->rank > 0) && ((sub->rank - 1) < tail))
sub->l_nbh_offt += (sub->x_size + 2);
}
}
int main(int argc, char *argv[])
{
double t_start;
struct subarray my_subarray = { };
double *A_device[2] = { };
double norm = 0.0;
MPI_Win win[2] = { MPI_WIN_NULL, MPI_WIN_NULL };
/* Initialization of runtime and initial state of data */
sycl::queue q(sycl::gpu_selector_v);
MPI_Init(&argc, &argv);
GetMySubarray(&my_subarray);
InitDeviceArrays(&A_device[0], &A_device[1], q, &my_subarray);
/* Create RMA window using device memory */
MPI_Win_create(A_device[0],
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[0]);
MPI_Win_create(A_device[1],
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[1]);
/* Start RMA exposure epoch */
MPI_Win_fence(0, win[0]);
MPI_Win_fence(0, win[1]);
if (PrintTime) {
t_start = MPI_Wtime();
}
for (int i = 0; i < Niter; ++i) {
MPI_Win cwin = win[(i + 1) % 2];
double *a = A_device[i % 2];
double *a_out = A_device[(i + 1) % 2];
{
/* Calculate values on borders to initiate communications early */
q.submit([&](auto & h) {
h.parallel_for(sycl::range(my_subarray.x_size), [ =] (auto index) {
int column = index[0];
int idx = XY_2_IDX(column, 0, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1]
+ a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
idx = XY_2_IDX(column, my_subarray.y_size - 1, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1]
+ a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
});
}).wait();
}
/* Perform 1D halo-exchange with neighbours */
if (my_subarray.rank != 0) {
int idx = XY_2_IDX(0, 0, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank - 1, my_subarray.l_nbh_offt,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
if (my_subarray.rank != (my_subarray.comm_size - 1)) {
int idx = XY_2_IDX(0, my_subarray.y_size - 1, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank + 1, 1,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
/* Recalculate internal points in parallel with communications */
{
q.submit([&](auto & h) {
h.parallel_for(sycl::range(my_subarray.y_size - 2, my_subarray.x_size), [ =] (auto index) {
int idx = XY_2_IDX(index[1], index[0] + 1, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1]
+ a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
});
}).wait();
}
/* Calculate and report norm value after given number of iterations */
if ((NormIteration > 0) && ((NormIteration - 1) == i % NormIteration)) {
double rank_norm = 0.0;
{
sycl::buffer<double> norm_buf(&rank_norm, 1);
q.submit([&](auto & h) {
auto sumr = sycl::reduction(norm_buf, h, sycl::plus<>());
h.parallel_for(sycl::range(my_subarray.x_size, my_subarray.y_size), sumr, [=] (auto index, auto &v) {
int idx = XY_2_IDX(index[1], index[0], my_subarray);
double diff = a_out[idx] - a[idx];
v += (diff * diff);
});
}).wait();
}
/* Get global norm value */
MPI_Reduce(&rank_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
printf("NORM value on iteration %d: %f\n", i+1, sqrt(norm));
}
}
/* Ensure all communications complete before next iteration */
MPI_Win_fence(0, cwin);
}
if (PrintTime) {
double avg_time;
double rank_time;
rank_time = MPI_Wtime() - t_start;
MPI_Reduce(&rank_time, &avg_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
avg_time = avg_time/my_subarray.comm_size;
printf("Average solver time: %f(sec)\n", avg_time);
}
}
if (my_subarray.rank == 0) {
printf("SUCCESS\n");
}
MPI_Win_free(&win[1]);
MPI_Win_free(&win[0]);
MPI_Finalize();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/MPI/jacobian_solver/src/02_jacobian_device_mpi_one-sided_gpu_aware/mpi3_onesided_jacobian_gpu_openmp.c
|
/*==============================================================
* Copyright © 2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
* ============================================================= */
/* Distributed Jacobian computation sample using OpenMP GPU offload and MPI-3 one-sided.
*/
#include "mpi.h"
#include <omp.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
const int Nx = 16384; /* Grid size */
const int Ny = Nx;
const int Niter = 100; /* Nuber of algorithm iterations */
const int NormIteration = 10; /* Recaluculate norm after given number of iterations. 0 to disable norm calculation */
const int PrintTime = 1; /* Output overall time of compute/communication part */
struct subarray {
int rank, comm_size; /* MPI rank and communicator size */
int x_size, y_size; /* Subarray size excluding border rows and columns */
MPI_Aint l_nbh_offt; /* Offset predecessor data to update */
};
#define ROW_SIZE(S) ((S).x_size + 2)
#define XY_2_IDX(X,Y,S) (((Y)+1)*ROW_SIZE(S)+((X)+1))
/* Subroutine to create and initialize initial state of input subarrays */
void InitDeviceArrays(double **A_dev_1, double **A_dev_2, struct subarray *sub)
{
size_t total_size = (sub->x_size + 2) * (sub->y_size + 2);
int device_id = omp_get_default_device();
int host_id = omp_get_initial_device();
double *A = (double*) malloc(total_size * sizeof(double));
*A_dev_1 = (double*) omp_target_alloc_device(total_size * sizeof(double), device_id);
*A_dev_2 = (double*) omp_target_alloc_device(total_size * sizeof(double), device_id);
for (int i = 0; i < (sub->y_size + 2); i++)
for (int j = 0; j < (sub->x_size + 2); j++)
A[i * (sub->x_size + 2) + j] = 0.0;
if (sub->rank == 0) /* set top boundary */
for (int i = 1; i <= sub->x_size; i++)
A[i] = 1.0; /* set bottom boundary */
if (sub->rank == (sub->comm_size - 1))
for (int i = 1; i <= sub->x_size; i++)
A[(sub->x_size + 2) * (sub->y_size + 1) + i] = 10.0;
for (int i = 1; i <= sub->y_size; i++) {
int row_offt = i * (sub->x_size + 2);
A[row_offt] = 1.0; /* set left boundary */
A[row_offt + sub->x_size + 1] = 1.0; /* set right boundary */
}
/* Move input arrays to device */
omp_target_memcpy(*A_dev_1, A, sizeof(double) * total_size, 0, 0, device_id, host_id);
omp_target_memcpy(*A_dev_2, A, sizeof(double) * total_size, 0, 0, device_id, host_id);
free(A);
}
/* Setup subarray size and layout processed by current rank */
void GetMySubarray(struct subarray *sub)
{
MPI_Comm_size(MPI_COMM_WORLD, &sub->comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &sub->rank);
sub->y_size = Ny / sub->comm_size;
sub->x_size = Nx;
sub->l_nbh_offt = (sub->x_size + 2) * (sub->y_size + 1) + 1;
int tail = sub->y_size % sub->comm_size;
if (tail != 0) {
if (sub->rank < tail)
sub->y_size++;
if ((sub->rank > 0) && ((sub->rank - 1) < tail))
sub->l_nbh_offt += (sub->x_size + 2);
}
}
int main(int argc, char *argv[])
{
double t_start;
struct subarray my_subarray = { };
double *A_device1 = NULL;
double *A_device2 = NULL;
MPI_Win win[2] = { MPI_WIN_NULL, MPI_WIN_NULL };
/* Initialization of runtime and initial state of data */
MPI_Init(&argc, &argv);
GetMySubarray(&my_subarray);
InitDeviceArrays(&A_device1, &A_device2, &my_subarray);
/* Create RMA window using device memory */
MPI_Win_create(A_device1,
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[0]);
MPI_Win_create(A_device2,
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[1]);
/* Start RMA exposure epoch */
MPI_Win_fence(0, win[0]);
MPI_Win_fence(0, win[1]);
if (PrintTime) {
t_start = MPI_Wtime();
}
#pragma omp target data map(to: Niter, my_subarray, win[0:2], NormIteration) use_device_ptr(A_device1, A_device2)
{
for (int i = 0; i < Niter; ++i) {
MPI_Win cwin = win[(i + 1) % 2];
double *a = (i % 2) ? A_device1 : A_device2;
double *a_out = ((1 + i) % 2) ? A_device1 : A_device2;
/* Offload compute loop to the device */
#pragma omp target teams distribute parallel for is_device_ptr(a, a_out)
/* Calculate values on borders to initiate communications early */
for (int _column = 0; _column < my_subarray.x_size; ++_column) {
int idx = XY_2_IDX(_column, 0, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1] + a[idx - ROW_SIZE(my_subarray)] +
a[idx + ROW_SIZE(my_subarray)]);
idx = XY_2_IDX(_column, my_subarray.y_size - 1, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1] + a[idx - ROW_SIZE(my_subarray)] +
a[idx + ROW_SIZE(my_subarray)]);
}
/* Perform halo-exchange with neighbours */
if (my_subarray.rank != 0) {
int idx = XY_2_IDX(0, 0, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank - 1, my_subarray.l_nbh_offt,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
if (my_subarray.rank != (my_subarray.comm_size - 1)) {
int idx = XY_2_IDX(0, my_subarray.y_size - 1, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank + 1, 1,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
/* Offload compute loop to the device */
#pragma omp target teams distribute parallel for is_device_ptr(a, a_out) collapse(2)
/* Recalculate internal points in parallel with communication */
for (int row = 1; row < my_subarray.y_size - 1; ++row) {
for (int column = 0; column < my_subarray.x_size; ++column) {
int idx = XY_2_IDX(column, row, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1] + a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
}
}
if ((NormIteration > 0) && ((NormIteration - 1) == i % NormIteration)) {
double result_norm = 0.0;
double norm = 0.0;
/* Offload compute loop to the device */
#pragma omp target teams distribute parallel for is_device_ptr(a, a_out) reduction(+:norm) collapse(2)
/* Calculate and report norm value after given number of iterations */
for (int row = 0; row < my_subarray.y_size; ++row) {
for (int column = 0; column < my_subarray.x_size; ++column) {
int idx = XY_2_IDX(column, row, my_subarray);
double diff = a_out[idx] - a[idx];
norm += diff*diff;
}
}
MPI_Reduce(&norm, &result_norm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
printf("NORM value on iteration %d: %f\n", i+1, sqrt(result_norm));
}
}
/* Ensure all communications are complete before next iteration */
MPI_Win_fence(0, cwin);
}
}
if (PrintTime) {
double avg_time;
double rank_time;
rank_time = MPI_Wtime() - t_start;
MPI_Reduce(&rank_time, &avg_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
avg_time = avg_time/my_subarray.comm_size;
printf("Average solver time: %f(sec)\n", avg_time);
}
}
if (my_subarray.rank == 0) {
printf("SUCCESS\n");
}
MPI_Win_free(&win[1]);
MPI_Win_free(&win[0]);
MPI_Finalize();
return 0;
}
|
c
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/MPI/jacobian_solver/src/01_jacobian_host_mpi_one-sided/mpi3_onesided_jacobian.c
|
/*==============================================================
* Copyright © 2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
* ============================================================= */
/* Distributed Jacobian computation sample using MPI-3 one-sided communications.
*/
#include "mpi.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
const int Nx = 16384; /* Grid size */
const int Ny = Nx;
const int Niter = 100; /* Nuber of algorithm iterations */
const int NormIteration = 10; /* Recaluculate norm after given number of iterations. 0 to disable norm calculation */
const int PrintTime = 1; /* Output overall time of compute/communication part */
struct subarray {
int rank, comm_size; /* MPI rank and communicator size */
int x_size, y_size; /* Subarray size excluding border rows and columns */
MPI_Aint l_nbh_offt; /* Offset predecessor data to update */
};
#define ROW_SIZE(S) ((S).x_size + 2)
#define XY_2_IDX(X,Y,S) (((Y)+1)*ROW_SIZE(S)+((X)+1))
/* Subroutine to create and initialize initial state of input subarrays */
void InitArrays(double **A_dev_1, double **A_dev_2, struct subarray *sub)
{
size_t total_size = (sub->x_size + 2) * (sub->y_size + 2);
double *A = (double*) malloc(total_size * sizeof(double));
*A_dev_1 = (double*) malloc(total_size * sizeof(double));
*A_dev_2 = (double*) malloc(total_size * sizeof(double));
for (int i = 0; i < (sub->y_size + 2); i++)
for (int j = 0; j < (sub->x_size + 2); j++)
A[i * (sub->x_size + 2) + j] = 0.0;
if (sub->rank == 0) /* set top boundary */
for (int i = 1; i <= sub->x_size; i++)
A[i] = 1.0; /* set bottom boundary */
if (sub->rank == (sub->comm_size - 1))
for (int i = 1; i <= sub->x_size; i++)
A[(sub->x_size + 2) * (sub->y_size + 1) + i] = 10.0;
for (int i = 1; i <= sub->y_size; i++) {
int row_offt = i * (sub->x_size + 2);
A[row_offt] = 1.0; /* set left boundary */
A[row_offt + sub->x_size + 1] = 1.0; /* set right boundary */
}
/* Move input arrays to real buffers */
memcpy(*A_dev_1, A, sizeof(double) * total_size);
memcpy(*A_dev_2, A, sizeof(double) * total_size);
free(A);
}
/* Setup subarray size and layout processed by current rank */
void GetMySubarray(struct subarray *sub)
{
MPI_Comm_size(MPI_COMM_WORLD, &sub->comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &sub->rank);
sub->y_size = Ny / sub->comm_size;
sub->x_size = Nx;
sub->l_nbh_offt = (sub->x_size + 2) * (sub->y_size + 1) + 1;
int tail = sub->y_size % sub->comm_size;
if (tail != 0) {
if (sub->rank < tail)
sub->y_size++;
if ((sub->rank > 0) && ((sub->rank - 1) < tail))
sub->l_nbh_offt += (sub->x_size + 2);
}
}
int main(int argc, char *argv[])
{
double t_start;
struct subarray my_subarray = { };
double *A_1 = NULL;
double *A_2 = NULL;
MPI_Win win[2] = { MPI_WIN_NULL, MPI_WIN_NULL };
/* Initialization of runtime and initial state of data */
MPI_Init(&argc, &argv);
GetMySubarray(&my_subarray);
InitArrays(&A_1, &A_2, &my_subarray);
/* Create RMA window using host memory */
MPI_Win_create(A_1,
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[0]);
MPI_Win_create(A_2,
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[1]);
/* Start RMA exposure epoch */
MPI_Win_fence(0, win[0]);
MPI_Win_fence(0, win[1]);
if (PrintTime) {
t_start = MPI_Wtime();
}
for (int i = 0; i < Niter; ++i) {
MPI_Win cwin = win[(i + 1) % 2];
double *a = (i % 2) ? A_1 : A_2;
double *a_out = ((1 + i) % 2) ? A_1 : A_2;
/* Calculate values on borders to initiate communications early */
for (int _column = 0; _column < my_subarray.x_size; ++_column) {
int idx = XY_2_IDX(_column, 0, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1] + a[idx - ROW_SIZE(my_subarray)] +
a[idx + ROW_SIZE(my_subarray)]);
idx = XY_2_IDX(_column, my_subarray.y_size - 1, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1] + a[idx - ROW_SIZE(my_subarray)] +
a[idx + ROW_SIZE(my_subarray)]);
}
/* Perform halo-exchange with neighbours */
if (my_subarray.rank != 0) {
int idx = XY_2_IDX(0, 0, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank - 1, my_subarray.l_nbh_offt,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
if (my_subarray.rank != (my_subarray.comm_size - 1)) {
int idx = XY_2_IDX(0, my_subarray.y_size - 1, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank + 1, 1,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
/* Recalculate internal points in parallel with communication */
for (int row = 1; row < my_subarray.y_size - 1; ++row) {
for (int column = 0; column < my_subarray.x_size; ++column) {
int idx = XY_2_IDX(column, row, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1] + a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
}
}
/* Calculate norm value after given number of iterations */
if ((NormIteration > 0) && ((NormIteration - 1) == i % NormIteration)) {
double result_norm = 0.0;
double norm = 0.0;
for (int row = 0; row < my_subarray.y_size; ++row) {
for (int column = 0; column < my_subarray.x_size; ++column) {
int idx = XY_2_IDX(column, row, my_subarray);
double diff = a_out[idx] - a[idx];
norm += diff*diff;
}
}
MPI_Reduce(&norm, &result_norm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
printf("NORM value on iteration %d: %f\n", i+1, sqrt(result_norm));
}
}
/* Ensure all communications are complete before next iteration */
MPI_Win_fence(0, cwin);
}
if (PrintTime) {
double avg_time;
double rank_time;
rank_time = MPI_Wtime() - t_start;
MPI_Reduce(&rank_time, &avg_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
avg_time = avg_time/my_subarray.comm_size;
printf("Average solver time: %f(sec)\n", avg_time);
}
}
if (my_subarray.rank == 0) {
printf("SUCCESS\n");
}
MPI_Win_free(&win[1]);
MPI_Win_free(&win[0]);
MPI_Finalize();
return 0;
}
|
c
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/MPI/jacobian_solver/src/03_jacobian_device_mpi_one-sided_device_initiated/mpi3_onesided_jacobian_gpu_sycl_device_initiated.cpp
|
/*==============================================================
* Copyright © 2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
* ============================================================= */
/* Distributed Jacobian computation sample using OpenMP GPU offload and MPI-3 one-sided.
*/
#include "mpi.h"
#include <sycl.hpp>
#include <vector>
#include <iostream>
const int Nx = 16384; /* Grid size */
const int Ny = Nx;
const int Niter = 100; /* Nuber of algorithm iterations */
const int NormIteration = 10; /* Recaluculate norm after given number of iterations. 0 to disable norm calculation */
const int PrintTime = 1; /* Output overall time of compute/communication part */
struct subarray {
int rank, comm_size; /* MPI rank and communicator size */
int x_size, y_size; /* Subarray size excluding border rows and columns */
MPI_Aint l_nbh_offt; /* Offset predecessor data to update */
};
#define ROW_SIZE(S) ((S).x_size + 2)
#define XY_2_IDX(X,Y,S) (((Y)+1)*ROW_SIZE(S)+((X)+1))
/* Subroutine to create and initialize initial state of input subarrays */
void InitDeviceArrays(double **A_dev_1, double **A_dev_2, sycl::queue q, struct subarray *sub)
{
size_t total_size = (sub->x_size + 2) * (sub->y_size + 2);
double *A = sycl::malloc_host < double >(total_size, q);
*A_dev_1 = sycl::malloc_device < double >(total_size, q);
*A_dev_2 = sycl::malloc_device < double >(total_size, q);
for (int i = 0; i < (sub->y_size + 2); i++)
for (int j = 0; j < (sub->x_size + 2); j++)
A[i * (sub->x_size + 2) + j] = 0.0;
if (sub->rank == 0) /* set top boundary */
for (int i = 1; i <= sub->x_size; i++)
A[i] = 1.0; /* set bottom boundary */
if (sub->rank == (sub->comm_size - 1))
for (int i = 1; i <= sub->x_size; i++)
A[(sub->x_size + 2) * (sub->y_size + 1) + i] = 10.0;
for (int i = 1; i <= sub->y_size; i++) {
int row_offt = i * (sub->x_size + 2);
A[row_offt] = 1.0; /* set left boundary */
A[row_offt + sub->x_size + 1] = 1.0; /* set right boundary */
}
/* Move input arrays to device */
q.memcpy(*A_dev_1, A, sizeof(double) * total_size);
q.memcpy(*A_dev_2, A, sizeof(double) * total_size);
q.wait();
}
/* Setup subarray size and layout processed by current rank */
void GetMySubarray(struct subarray *sub)
{
MPI_Comm_size(MPI_COMM_WORLD, &sub->comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &sub->rank);
sub->y_size = Ny / sub->comm_size;
sub->x_size = Nx;
sub->l_nbh_offt = (sub->x_size + 2) * (sub->y_size + 1) + 1;
int tail = sub->y_size % sub->comm_size;
if (tail != 0) {
if (sub->rank < tail)
sub->y_size++;
if ((sub->rank > 0) && ((sub->rank - 1) < tail))
sub->l_nbh_offt += (sub->x_size + 2);
}
}
int main(int argc, char *argv[])
{
double t_start;
struct subarray my_subarray = { };
double *A_device[2] = { };
MPI_Win win[2] = { MPI_WIN_NULL, MPI_WIN_NULL };
int batch_iters = 0;
int passed_iters = 0;
double norm = 0.0;
int provided;
/* Initialization of runtime and initial state of data */
sycl::queue q(sycl::gpu_selector_v);
/* MPI_THREAD_MULTIPLE is required for device-initiated communications */
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
GetMySubarray(&my_subarray);
InitDeviceArrays(&A_device[0], &A_device[1], q, &my_subarray);
#ifdef GROUP_SIZE_DEFAULT
int work_group_size = GROUP_SIZE_DEFAULT;
#else
int work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
#endif
if ((Nx % work_group_size) != 0) {
if (my_subarray.rank == 0) {
printf("For simplification, sycl::info::device::max_work_group_size should be divider of X dimention of array\n");
printf("Please adjust matrix size, or define GROUP_SIZE_DEFAULT\n");
printf("sycl::info::device::max_work_group_size=%d Nx=%d (%d)\n", work_group_size, Nx, work_group_size % Nx);
MPI_Abort(MPI_COMM_WORLD, -1);
}
}
/* Create RMA window using device memory */
MPI_Win_create(A_device[0],
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[0]);
MPI_Win_create(A_device[1],
sizeof(double) * (my_subarray.x_size + 2) * (my_subarray.y_size + 2),
sizeof(double), MPI_INFO_NULL, MPI_COMM_WORLD, &win[1]);
/* Start RMA exposure epoch */
MPI_Win_fence(0, win[0]);
MPI_Win_fence(0, win[1]);
if (PrintTime) {
t_start = MPI_Wtime();
}
for (int i = 0; i < Niter; ++i)
{
MPI_Win cwin = win[(i + 1) % 2];
double *a = A_device[i % 2];
double *a_out = A_device[(i + 1) % 2];
q.submit([&](auto & h) {
h.parallel_for(sycl::nd_range<1>(work_group_size, work_group_size),
[=](sycl::nd_item<1> item) {
int local_id = item.get_local_id();
int col_per_wg = my_subarray.x_size / work_group_size;
int my_x_lb = col_per_wg * local_id;
int my_x_ub = my_x_lb + col_per_wg;
/* Calculate values on borders to initiate communications early */
for (int column = my_x_lb; column < my_x_ub; column ++) {
int idx = XY_2_IDX(column, 0, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1]
+ a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
idx = XY_2_IDX(column, my_subarray.y_size - 1, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1]
+ a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
}
item.barrier(sycl::access::fence_space::global_space);
if (local_id == 0) {
/* Perform 1D halo-exchange with neighbours */
if (my_subarray.rank != 0) {
int idx = XY_2_IDX(0, 0, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank - 1, my_subarray.l_nbh_offt,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
if (my_subarray.rank != (my_subarray.comm_size - 1)) {
int idx = XY_2_IDX(0, my_subarray.y_size - 1, my_subarray);
MPI_Put(&a_out[idx], my_subarray.x_size, MPI_DOUBLE,
my_subarray.rank + 1, 1,
my_subarray.x_size, MPI_DOUBLE, cwin);
}
}
/* Recalculate internal points in parallel with comunications */
for (int row = 1; row < my_subarray.y_size - 1; ++row) {
for (int column = my_x_lb; column < my_x_ub; column ++) {
int idx = XY_2_IDX(column, row, my_subarray);
a_out[idx] = 0.25 * (a[idx - 1] + a[idx + 1]
+ a[idx - ROW_SIZE(my_subarray)]
+ a[idx + ROW_SIZE(my_subarray)]);
}
}
item.barrier(sycl::access::fence_space::global_space);
});
}).wait();
/* Calculate and report norm value after given number of iterations */
if ((NormIteration > 0) && ((NormIteration - 1) == i % NormIteration)) {
double rank_norm = 0.0;
{
sycl::buffer<double> norm_buf(&rank_norm, 1);
q.submit([&](auto & h) {
auto sumr = sycl::reduction(norm_buf, h, sycl::plus<>());
h.parallel_for(sycl::range(my_subarray.x_size, my_subarray.y_size), sumr, [=] (auto index, auto &v) {
int idx = XY_2_IDX(index[0], index[1], my_subarray);
double diff = a_out[idx] - a[idx];
v += (diff * diff);
});
}).wait();
}
/* Get global norm value */
MPI_Reduce(&rank_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
printf("NORM value on iteration %d: %f\n", passed_iters + batch_iters + 1, sqrt(norm));
}
rank_norm = 0.0;
}
/* Ensure all communications complete before next iteration */
MPI_Win_fence(0, cwin);
}
if (PrintTime) {
double avg_time;
double rank_time;
rank_time = MPI_Wtime() - t_start;
MPI_Reduce(&rank_time, &avg_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_subarray.rank == 0) {
avg_time = avg_time/my_subarray.comm_size;
printf("Average solver time: %f(sec)\n", avg_time);
}
}
if (my_subarray.rank == 0) {
printf("[%d] SUCCESS\n", my_subarray.rank);
}
MPI_Win_free(&win[1]);
MPI_Win_free(&win[0]);
MPI_Finalize();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneTBB/tbb-task-sycl/src/tbb-task-sycl.cpp
|
//==============================================================
// Copyright � 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <array>
#include <iostream>
#include <numeric>
#include <sycl/sycl.hpp>
#include <tbb/blocked_range.h>
#include <tbb/global_control.h>
#include <tbb/parallel_for.h>
#include "tbb/task_group.h"
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities//include/dpc_common.hpp
#include "dpc_common.hpp"
#define VERBOSE
const float alpha = 0.5; // coeff for triad calculation
const size_t array_size = 16;
std::array<float, array_size> a_array; // input
std::array<float, array_size> b_array; // input
std::array<float, array_size> c_array; // output
std::array<float, array_size> c_array_tbb; // output
class ExecuteOnGpu {
const char* message;
public:
ExecuteOnGpu(const char* str) : message(str) {}
void operator()() const {
std::cout << message << "\n";
// By including all the SYCL work in a {} block, we ensure
// all SYCL tasks must complete before exiting the block
{ // starting SYCL code
const float coeff = alpha; // coeff is a local varaible
sycl::range<1> n_items{array_size};
sycl::buffer a_buffer(a_array);
sycl::buffer b_buffer(b_array);
sycl::buffer c_buffer(c_array);
sycl::queue q(sycl::default_selector_v, dpc_common::exception_handler);
q.submit([&](sycl::handler& h) {
sycl::accessor a_accessor(a_buffer, h, sycl::read_only);
sycl::accessor b_accessor(b_buffer, h, sycl::read_only);
sycl::accessor c_accessor(c_buffer, h, sycl::write_only);
h.parallel_for(n_items, [=](sycl::id<1> index) {
c_accessor[index] = a_accessor[index] + b_accessor[index] * coeff;
}); // end of the kernel -- parallel for
})
.wait_and_throw(); // end of the commands for the SYCL queue
} // end of the scope for SYCL code; wait unti queued work completes
} // operator
};
class ExecuteOnCpu {
const char* message;
public:
ExecuteOnCpu(const char* str) : message(str) {}
void operator()() const {
std::cout << message << "\n";
tbb::parallel_for(tbb::blocked_range<int>(0, a_array.size()),
[&](tbb::blocked_range<int> r) {
for (int index = r.begin(); index < r.end(); ++index) {
c_array_tbb[index] =
a_array[index] + b_array[index] * alpha;
}
});
} // operator()
};
void PrintArr(const char* text, const std::array<float, array_size>& array) {
std::cout << text;
for (const auto& s : array) std::cout << s << ' ';
std::cout << "\n";
}
int main() {
// init input arrays
std::iota(a_array.begin(), a_array.end(), 0);
std::iota(b_array.begin(), b_array.end(), 0);
// start tbb task group
tbb::task_group tg;
// tbb::task_scheduler_init init(2);
int nth = 4; // number of threads
auto mp = tbb::global_control::max_allowed_parallelism;
tbb::global_control gc(mp, nth);
tg.run(ExecuteOnGpu("executing on GPU")); // spawn task and return
tg.run(ExecuteOnCpu("executing on CPU")); // spawn task and return
tg.wait(); // wait for tasks to complete
// Serial execution
std::array<float, array_size> c_gold;
for (size_t i = 0; i < array_size; ++i)
c_gold[i] = a_array[i] + alpha * b_array[i];
// Compare golden triad with heterogeneous triad
if (!std::equal(std::begin(c_array), std::end(c_array), std::begin(c_gold)))
std::cout << "Heterogenous triad error.\n";
else
std::cout << "Heterogenous triad correct.\n";
// Compare golden triad with TBB triad
if (!std::equal(std::begin(c_array_tbb), end(c_array_tbb),
std::begin(c_gold)))
std::cout << "TBB triad error.\n";
else
std::cout << "TBB triad correct.\n";
#ifdef VERBOSE
PrintArr("input array a_array: ", a_array);
PrintArr("input array b_array: ", b_array);
PrintArr("output array c_array on GPU: ", c_array);
PrintArr("output array c_array_tbb on CPU: ", c_array_tbb);
#endif
} // main
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneTBB/tbb-resumable-tasks-sycl/src/tbb-resumable-tasks-sycl.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <array>
#include <atomic>
#include <cmath>
#include <iostream>
#include <sstream>
#include <thread>
#include <sycl/sycl.hpp>
#include <tbb/blocked_range.h>
#include <tbb/global_control.h>
#include <tbb/task.h>
#include <tbb/task_group.h>
#include <tbb/parallel_for.h>
constexpr float ratio = 0.5; // CPU or GPU offload ratio
constexpr float alpha = 0.5; // coeff for triad calculation
constexpr std::size_t array_size = 16;
std::array<float, array_size> a_array; // input array
std::array<float, array_size> b_array; // input array
std::array<float, array_size> c_array; // output array
void print_array( const char* text, const std::array<float, array_size>& array ) {
std::cout << text;
for (const auto& s: array) std::cout << s << ' ';
std::cout << std::endl;
}
class AsyncActivity {
float offload_ratio;
std::atomic<bool> submit_flag;
tbb::task::suspend_point suspend_point;
std::thread service_thread;
public:
AsyncActivity() : offload_ratio(0), submit_flag(false),
service_thread([this] {
// Wait until the job will be submitted into the async activity
while(!submit_flag)
std::this_thread::yield();
std::size_t array_size_sycl = std::ceil(array_size * offload_ratio);
// Note that this lambda will be executed concurrently with the task
// passed into tbb::task_group
std::stringstream sstream;
sstream << "start index for GPU = 0; end index for GPU = "
<< array_size_sycl << std::endl;
std::cout << sstream.str();
const float coeff = alpha; // coeff is a local variable
{ // starting SYCL code
sycl::range<1> n_items{array_size_sycl};
sycl::buffer<cl_float, 1> a_buffer(a_array.data(), n_items);
sycl::buffer<cl_float, 1> b_buffer(b_array.data(), n_items);
sycl::buffer<cl_float, 1> c_buffer(c_array.data(), n_items);
sycl::queue q;
q.submit([&](sycl::handler& h) {
auto a_accessor = a_buffer.get_access<sycl::access::mode::read>(h);
auto b_accessor = b_buffer.get_access<sycl::access::mode::read>(h);
auto c_accessor = c_buffer.get_access<sycl::access::mode::write>(h);
h.parallel_for(n_items, [=](sycl::id<1> index) {
c_accessor[index] = a_accessor[index] + coeff * b_accessor[index];
}); // end of the kernel
}).wait();
}
// Pass a signal into the main thread that the GPU work is completed
tbb::task::resume(suspend_point);
}) {}
~AsyncActivity() {
service_thread.join();
}
void submit( float ratio, tbb::task::suspend_point sus_point ) {
offload_ratio = ratio;
suspend_point = sus_point;
submit_flag = true;
}
}; // class AsyncActivity
int main() {
// init input arrays
for (std::size_t i = 0; i < array_size; ++i) {
a_array[i] = i;
b_array[i] = i;
}
std::size_t n_threads = 4;
tbb::global_control gc(tbb::global_control::max_allowed_parallelism, n_threads + 1); // One more thread, but sleeping
tbb::task_group tg;
AsyncActivity activity;
// Run CPU part
// tg.run is a non- blocking call
tg.run([&]{
std::size_t i_start = static_cast<std::size_t>(std::ceil(array_size * ratio));
std::size_t i_end = array_size;
std::stringstream sstream;
sstream << "start index for CPU = " << i_start
<< "; end index for CPU = " << i_end << std::endl;
std::cout << sstream.str();
tbb::parallel_for(i_start, i_end, []( std::size_t index ) {
c_array[index] = a_array[index] + alpha * b_array[index];
});
});
// Run GPU part
tbb::task::suspend([&]( tbb::task::suspend_point suspend_point ) {
activity.submit(ratio, suspend_point);
});
tg.wait();
// Serial execution
std::array<float, array_size> c_gold;
for (std::size_t i = 0; i < array_size; ++i) {
c_gold[i] = a_array[i] + alpha * b_array[i];
}
// Compare golden triad with heterogeneous triad
if (c_array != c_gold) {
std::cout << "Heterogeneous triad error." << std::endl;
} else {
std::cout << "Heterogeneous triad correct." << std::endl;
}
print_array("c_array: ", c_array);
print_array("c_gold: ", c_gold);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneTBB/tbb-async-sycl/src/tbb-async-sycl.cpp
|
//==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <cmath> //for std::ceil
#include <array>
#include <atomic>
#include <iostream>
#include <thread>
#include <numeric>
#include <sycl/sycl.hpp>
#include <tbb/blocked_range.h>
#include <tbb/flow_graph.h>
#include <tbb/global_control.h>
#include <tbb/parallel_for.h>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities//include/dpc_common.hpp
#include "dpc_common.hpp"
struct done_tag{};
const float ratio = 0.5; // CPU to GPU offload ratio
const float alpha = 0.5; // coeff for triad calculation
const size_t array_size = 16;
std::array<float, array_size> a_array; // input
std::array<float, array_size> b_array; // input
std::array<float, array_size> c_array; // output
void PrintArr(const char* text, const std::array<float, array_size>& array) {
std::cout << text;
for (const auto& s : array) std::cout << s << ' ';
std::cout << "\n";
}
using async_node_type = tbb::flow::async_node<float, done_tag>;
using gateway_type = async_node_type::gateway_type;
class AsyncActivity {
gateway_type* gateway_ptr;
float offload_ratio;
std::atomic<bool> submit_flag;
std::thread service_thread;
public:
AsyncActivity() : gateway_ptr(nullptr), offload_ratio(0), submit_flag(false),
service_thread( [this] {
while( !submit_flag ) {
std::this_thread::yield();
}
// Execute the kernel over a portion of the array range
size_t array_size_sycl = std::ceil(array_size * offload_ratio);
std::cout << "start index for GPU = 0; end index for GPU = "
<< array_size_sycl << "\n";
const float coeff = alpha; // coeff is a local varaible
// By including all the SYCL work in a {} block, we ensure
// all SYCL tasks must complete before exiting the block
{ // starting SYCL code
sycl::range<1> n_items{array_size_sycl};
sycl::buffer a_buffer(a_array);
sycl::buffer b_buffer(b_array);
sycl::buffer c_buffer(c_array);
sycl::queue q(sycl::default_selector_v, dpc_common::exception_handler);
q.submit([&](sycl::handler& h) {
sycl::accessor a_accessor(a_buffer, h, sycl::read_only);
sycl::accessor b_accessor(b_buffer, h, sycl::read_only);
sycl::accessor c_accessor(c_buffer, h, sycl::write_only);
h.parallel_for( n_items, [=](sycl::id<1> index) {
c_accessor[index] = a_accessor[index] + b_accessor[index] * coeff;
}); // end of the kernel -- parallel for
}).wait();
} // end of the scope for SYCL code; wait unti queued work completes;
gateway_ptr->try_put(done_tag{});
gateway_ptr->release_wait();
} ) {}
~AsyncActivity() {
service_thread.join();
}
void submit(float ratio, gateway_type& gateway) {
gateway.reserve_wait();
offload_ratio = ratio;
gateway_ptr = &gateway;
submit_flag = true;
}
};
int main() {
// init input arrays
std::iota(a_array.begin(), a_array.end(), 0);
std::iota(b_array.begin(), b_array.end(), 0);
int nth = 4; // number of threads
auto mp = tbb::global_control::max_allowed_parallelism;
tbb::global_control gc(mp, nth + 1); // One more thread, but sleeping
tbb::flow::graph g;
// Input node:
tbb::flow::input_node<float> in_node{g,
[&](tbb::flow_control& fc) -> float {
static bool has_run = false;
if (has_run) fc.stop();
has_run = true;
return ratio;
}
};
// CPU node
tbb::flow::function_node<float, done_tag> cpu_node{
g, tbb::flow::unlimited, [&](float offload_ratio) {
size_t i_start = static_cast<size_t>(std::ceil(array_size * offload_ratio));
size_t i_end = static_cast<size_t>(array_size);
std::cout << "start index for CPU = " << i_start
<< "; end index for CPU = " << i_end << "\n";
tbb::parallel_for(tbb::blocked_range<size_t>{i_start, i_end},
[&](const tbb::blocked_range<size_t>& r) {
for (size_t i = r.begin(); i < r.end(); ++i)
c_array[i] = a_array[i] + alpha * b_array[i];
});
return done_tag{};
}};
// async node -- GPU
AsyncActivity async_act;
async_node_type a_node{
g, tbb::flow::unlimited,
[&async_act](const float& offload_ratio, gateway_type& gateway) {
async_act.submit(offload_ratio, gateway);
}};
// join node
using join_t =
tbb::flow::join_node<std::tuple<done_tag, done_tag>, tbb::flow::queueing>;
join_t node_join{g};
// out node
tbb::flow::function_node<join_t::output_type> out_node{
g, tbb::flow::unlimited, [&](const join_t::output_type&) {
// Serial execution
std::array<float, array_size> c_gold;
for (size_t i = 0; i < array_size; ++i)
c_gold[i] = a_array[i] + alpha * b_array[i];
// Compare golden triad with heterogeneous triad
if (!std::equal(std::begin(c_array), std::end(c_array),
std::begin(c_gold)))
std::cout << "Heterogenous triad error.\n";
else
std::cout << "Heterogenous triad correct.\n";
PrintArr("c_array: ", c_array);
PrintArr("c_gold : ", c_gold);
}}; // end of out node
// construct graph
tbb::flow::make_edge(in_node, a_node);
tbb::flow::make_edge(in_node, cpu_node);
tbb::flow::make_edge(a_node, tbb::flow::input_port<0>(node_join));
tbb::flow::make_edge(cpu_node, tbb::flow::input_port<1>(node_join));
tbb::flow::make_edge(node_join, out_node);
in_node.activate();
g.wait_for_all();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/sparse_conjugate_gradient/utils.hpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
template <typename fp> fp rand_scalar() {
return fp(std::rand()) / fp(RAND_MAX) - fp(0.5);
}
// Create the 3arrays CSR representation (ia, ja, values)
// iniitialized by a stencil-based matrix with size nx=ny=nz
template <typename fp, typename intType>
void generate_sparse_matrix(const intType nx,
std::vector<intType> &ia,
std::vector<intType> &ja,
std::vector<fp> &a)
{
intType nz = nx, ny = nx;
intType nnz = 0;
intType current_row;
ia[0] = 0;
for (intType iz = 0; iz < nz; iz++) {
for (intType iy = 0; iy < ny; iy++) {
for (intType ix = 0; ix < nx; ix++) {
current_row = iz * nx * ny + iy * nx + ix;
for (intType sz = -1; sz <= 1; sz++) {
if (iz + sz > -1 && iz + sz < nz) {
for (intType sy = -1; sy <= 1; sy++) {
if (iy + sy > -1 && iy + sy < ny) {
for (intType sx = -1; sx <= 1; sx++) {
if (ix + sx > -1 && ix + sx < nx) {
intType current_column =
current_row + sz * nx * ny + sy * nx + sx;
ja[nnz] = current_column;
if (current_column == current_row) {
a[nnz++] = 26.;
}
else {
a[nnz++] = -1.;
}
} // end
// x
// bounds
// test
} // end sx loop
} // end y bounds test
} // end sy loop
} // end z bounds test
} // end sz loop
ia[current_row + 1] = nnz;
} // end ix loop
} // end iy loop
} // end iz loop
}
|
hpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/sparse_conjugate_gradient/sparse_cg.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
*
* Content:
* This sample demonstrates use of oneAPI Math Kernel Library (oneMKL)
* sparse BLAS API to solve a system of linear equations (Ax=b).
*
* It uses the preconditioned conjugate gradient method with a symmetric
* Gauss-Seidel preconditioner:
*
* Compute r_0 = b - Ax_0
* w_0 = B^{-1}*r_0 and p_0 = w_0
* while not converged
* {
* alpha_k = (r_k , w_k )/(Ap_k , p_k )
* x_{k+1} = x_k + alpha_k*p_k
* r_{k+1} = r_k - alpha_k*A*p_k
* w_{k+1} = B^{-1}*r_{k+1}
* beta_k = (r_{k+1}, w_{k+1})/(r_k , w_k )
* p_{k+1} = w_{k+1} + beta_k*p_k
* }
*
* where A = -L+D-L^t; B = (D-L)*D^{-1}*(D-L^t).
*
* The supported floating point data types for gemm matrix data are:
* float
* double
*
*/
// stl includes
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <iterator>
#include <limits>
#include <list>
#include <vector>
#include <sycl/sycl.hpp>
#include "oneapi/mkl.hpp"
#include "utils.hpp"
using namespace oneapi;
template <typename fp, typename intType>
static void diagonal_mv(sycl::queue main_queue,
const intType nrows,
sycl::buffer<fp, 1> &d_buffer,
sycl::buffer<fp, 1> &t_buffer)
{
main_queue.submit([&](sycl::handler &cgh) {
auto d = (d_buffer).template get_access<sycl::access::mode::write>(cgh);
auto t = (t_buffer).template get_access<sycl::access::mode::read_write>(cgh);
auto diagonalMVKernel = [=](sycl::item<1> item) {
const int row = item.get_id(0);
t[row] *= d[row];
};
cgh.parallel_for(sycl::range<1>(nrows), diagonalMVKernel);
});
}
template <typename fp, typename intType>
void run_sparse_cg_example(const sycl::device &dev)
{
// Matrix data size
intType size = 4;
intType nrows = size * size * size;
// Input matrix in CSR format
std::vector<intType> ia;
std::vector<intType> ja;
std::vector<fp> a;
ia.resize(nrows + 1);
ja.resize(27 * nrows);
a.resize(27 * nrows);
generate_sparse_matrix<fp, intType>(size, ia, ja, a);
// Vectors x and y
std::vector<fp> x;
std::vector<fp> b;
x.resize(nrows);
b.resize(nrows);
// Init right hand side and vector x
for (int i = 0; i < nrows; i++) {
b[i] = 1;
x[i] = 0;
}
// Catch asynchronous exceptions
auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
}
catch (sycl::exception const &e) {
std::cout << "Caught asynchronous SYCL "
"exception during sparse CG:\n"
<< e.what() << std::endl;
}
}
};
//
// Execute CG
//
// create execution queue and buffers of matrix data
sycl::queue main_queue(dev, exception_handler);
sycl::buffer<intType, 1> ia_buffer(ia.data(), nrows + 1);
sycl::buffer<intType, 1> ja_buffer(ja.data(), ia[nrows]);
sycl::buffer<fp, 1> a_buffer(a.data(), ia[nrows]);
sycl::buffer<fp, 1> x_buffer(x);
sycl::buffer<fp, 1> b_buffer(b);
sycl::buffer<fp, 1> r_buffer(nrows);
sycl::buffer<fp, 1> w_buffer(nrows);
sycl::buffer<fp, 1> p_buffer(nrows);
sycl::buffer<fp, 1> t_buffer(nrows);
sycl::buffer<fp, 1> y_buffer(nrows);
sycl::buffer<fp, 1> d_buffer(nrows);
sycl::buffer<fp, 1> temp_buffer(1);
// create and initialize handle for a Sparse Matrix in CSR format
mkl::sparse::matrix_handle_t handle;
try {
mkl::sparse::init_matrix_handle(&handle);
mkl::sparse::set_csr_data(handle, nrows, nrows, mkl::index_base::zero,
ia_buffer, ja_buffer, a_buffer);
mkl::sparse::set_matrix_property(handle, mkl::sparse::property::symmetric);
mkl::sparse::set_matrix_property(handle, mkl::sparse::property::sorted);
mkl::sparse::optimize_trsv(main_queue, mkl::uplo::lower,
mkl::transpose::nontrans,
mkl::diag::nonunit, handle);
mkl::sparse::optimize_trsv(main_queue, mkl::uplo::upper,
mkl::transpose::nontrans,
mkl::diag::nonunit, handle);
mkl::sparse::optimize_gemv(main_queue, mkl::transpose::nontrans, handle);
main_queue.submit([&](sycl::handler &cgh) {
auto ia = (ia_buffer).template get_access<sycl::access::mode::read>(cgh);
auto ja = (ja_buffer).template get_access<sycl::access::mode::read>(cgh);
auto a = (a_buffer).template get_access<sycl::access::mode::read>(cgh);
auto d = (d_buffer).template get_access<sycl::access::mode::write>(cgh);
auto extractDiagonalKernel = [=](sycl::item<1> item) {
const int row = item.get_id(0);
for (intType i = ia[row]; i < ia[row + 1]; i++) {
if (ja[i] == row) {
d[row] = a[i];
break;
}
}
};
cgh.parallel_for(sycl::range<1>(nrows), extractDiagonalKernel);
});
// initial residual equal to RHS cause of zero initial vector
mkl::blas::copy(main_queue, nrows, b_buffer, 1, r_buffer, 1);
// Calculation B^{-1}r_0
{
mkl::sparse::trsv(main_queue, mkl::uplo::lower,
mkl::transpose::nontrans, mkl::diag::nonunit,
handle, r_buffer, t_buffer);
diagonal_mv<fp, intType>(main_queue, nrows, d_buffer, t_buffer);
mkl::sparse::trsv(main_queue, mkl::uplo::upper,
mkl::transpose::nontrans, mkl::diag::nonunit,
handle, t_buffer, w_buffer);
}
mkl::blas::copy(main_queue, nrows, w_buffer, 1, p_buffer, 1);
// Calculate initial norm of correction
fp initial_norm_of_correction = 0;
mkl::blas::nrm2(main_queue, nrows, w_buffer, 1, temp_buffer);
{
auto temp_accessor = temp_buffer.template get_access<sycl::access::mode::read>();
initial_norm_of_correction = temp_accessor[0];
}
fp norm_of_correction = initial_norm_of_correction;
// Start of main PCG algorithm
std::int32_t k = 0;
fp alpha, beta, temp;
mkl::blas::dot(main_queue, nrows, r_buffer, 1, w_buffer, 1, temp_buffer);
{
auto temp_accessor = temp_buffer.template get_access<sycl::access::mode::read>();
temp = temp_accessor[0];
}
while (norm_of_correction / initial_norm_of_correction > 1.e-3 && k < 100) {
// Calculate A*p
mkl::sparse::gemv(main_queue, mkl::transpose::nontrans, 1.0, handle,
p_buffer, 0.0, t_buffer);
// Calculate alpha_k
mkl::blas::dot(main_queue, nrows, p_buffer, 1, t_buffer, 1, temp_buffer);
{
auto temp_accessor =
temp_buffer.template get_access<sycl::access::mode::read>();
alpha = temp / temp_accessor[0];
}
// Calculate x_k = x_k + alpha*p_k
mkl::blas::axpy(main_queue, nrows, alpha, p_buffer, 1, x_buffer, 1);
// Calculate r_k = r_k - alpha*A*p_k
mkl::sparse::gemv(main_queue, mkl::transpose::nontrans, -alpha, handle,
p_buffer, 1.0, r_buffer);
// Calculate w_k = B^{-1}r_k
{
mkl::sparse::trsv(main_queue, mkl::uplo::lower,
mkl::transpose::nontrans,
mkl::diag::nonunit, handle, r_buffer, t_buffer);
diagonal_mv<fp, intType>(main_queue, nrows, d_buffer, t_buffer);
mkl::sparse::trsv(main_queue, mkl::uplo::upper,
mkl::transpose::nontrans,
mkl::diag::nonunit, handle, t_buffer, w_buffer);
}
// Calculate current norm of correction
mkl::blas::nrm2(main_queue, nrows, w_buffer, 1, temp_buffer);
{
auto temp_accessor = temp_buffer.template get_access<sycl::access::mode::read>();
norm_of_correction = temp_accessor[0];
}
std::cout << "\t\trelative norm of residual on " << ++k
<< " iteration: " << norm_of_correction / initial_norm_of_correction
<< std::endl;
if (norm_of_correction <= 1.e-3)
break;
// Calculate beta_k
mkl::blas::dot(main_queue, nrows, r_buffer, 1, w_buffer, 1, temp_buffer);
{
auto temp_accessor = temp_buffer.template get_access<sycl::access::mode::read>();
beta = temp_accessor[0] / temp;
temp = temp_accessor[0];
}
// Calculate p_k = w_k+beta*p_k
mkl::blas::axpy(main_queue, nrows, beta, p_buffer, 1, w_buffer, 1);
mkl::blas::copy(main_queue, nrows, w_buffer, 1, p_buffer, 1);
}
std::cout << "\n\t\tPreconditioned CG process has successfully converged, and\n"
<< "\t\tthe following solution has been obtained:\n\n";
auto result = x_buffer.template get_access<sycl::access::mode::read>();
for (std::int32_t i = 0; i < 4; i++) {
std::cout << "\t\tx[" << i << "] = " << result[i] << std::endl;
}
std::cout << "\t\t..." << std::endl;
}
catch (std::exception const &e) {
std::cout << "\t\tCaught exception:\n" << e.what() << std::endl;
}
mkl::sparse::release_matrix_handle(&handle);
}
//
// Description of example setup, apis used and supported floating point type
// precisions
//
void print_banner()
{
std::cout << "###############################################################"
"#########\n"
"# Sparse Conjugate Gradient Solver\n"
"# \n"
"# Uses the preconditioned conjugate gradient algorithm to\n"
"# iteratively solve the symmetric linear system\n"
"# \n"
"# A * x = b\n"
"# \n"
"# where A is a symmetric sparse matrix in CSR format, and\n"
"# x and b are dense vectors.\n"
"# \n"
"# Uses the symmetric Gauss-Seidel preconditioner.\n"
"# \n"
"###############################################################"
"#########\n\n";
}
int main(int argc, char **argv)
{
print_banner();
sycl::device my_dev{sycl::default_selector{}};
std::cout << "Running tests on " << my_dev.get_info<sycl::info::device::name>() << ".\n";
std::cout << "\tRunning with single precision real data type:" << std::endl;
run_sparse_cg_example<float, std::int32_t>(my_dev);
if (my_dev.get_info<sycl::info::device::double_fp_config>().size() != 0) {
std::cout << "\tRunning with double precision real data type:" << std::endl;
run_sparse_cg_example<double, std::int32_t>(my_dev);
}
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/black_scholes/src/black_scholes.hpp
|
//==============================================================
// Copyright © 2023 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef __BLACK_SCHOLES_HPP__
#define __BLACK_SCHOLES_HPP__
#include <vector>
#include <chrono>
/******* VERSION *******/
#define MAJOR 1
#define MINOR 6
/******* VERSION *******/
#ifndef DATA_TYPE
#define DATA_TYPE double
#endif
#ifndef VERBOSE
#define VERBOSE 1
#endif
constexpr float volatility = 0.30f;
constexpr float risk_free = 0.02f;
constexpr size_t opt_n =
#if SMALL_OPT_N
480;
#else
8 * 1024 * 1024;
#endif
#ifndef ITER_N
#define ITER_N 512
#endif
#ifndef __clang_major__
#define __clang_major__ 0
#endif
#ifndef __clang_minor__
#define __clang_minor__ 0
#endif
#ifndef __VERSION__
#define __VERSION__ __clang_major__
#endif
class BlackScholes {
public:
BlackScholes();
~BlackScholes();
void run();
void check();
private:
DATA_TYPE* h_call_result;
DATA_TYPE* h_put_result;
DATA_TYPE* h_stock_price;
DATA_TYPE* h_option_strike;
DATA_TYPE* h_option_years;
void body();
};
// Black-Scholes Reference Implementation
void BlackScholesRefImpl(
double& call_result,
double Sf, //Stock price
double Xf, //Option strike
double Tf, //Option years
double Rf, //Riskless rate
double Vf //Volatility rate
)
{
// BSM Formula: https://www.nobelprize.org/prizes/economic-sciences/1997/press-release/
double S = Sf, L = Xf, t = Tf, r = Rf, sigma = Vf;
double N_d1 = 1. / 2. + 1. / 2. * std::erf(((std::log(S / L) + (r + 0.5 * sigma * sigma) * t) / (sigma * std::sqrt(t))) / std::sqrt(2.));
double N_d2 = 1. / 2. + 1. / 2. * std::erf(((std::log(S / L) + (r - 0.5 * sigma * sigma) * t) / (sigma * std::sqrt(t))) / std::sqrt(2.));
call_result = (S * N_d1 - L * std::exp(-r * t) * N_d2);
}
void BlackScholes::check()
{
if (VERBOSE) {
std::printf("Creating the reference result...\n");
std::vector<double> h_CallResultCPU(opt_n);
for (size_t opt = 0; opt < opt_n; opt++)
BlackScholesRefImpl(h_CallResultCPU[opt], h_stock_price[opt], h_option_strike[opt], h_option_years[opt], risk_free, volatility);
double sum_delta = 0.0,
sum_ref = 0.0,
max_delta = 0.0,
errorVal = 0.0;
for (size_t i = 0; i < opt_n; i++) {
auto ref = h_CallResultCPU[i];
auto delta = std::fabs(h_CallResultCPU[i] - h_call_result[i]);
if (delta > max_delta) {
max_delta = delta;
}
sum_delta += delta;
sum_ref += std::fabs(ref);
}
if (sum_ref > 1E-5)
std::printf("L1 norm: %E\n", errorVal = sum_delta / sum_ref);
else
std::printf("Avg. diff: %E\n", errorVal = sum_delta / opt_n);
std::printf((errorVal < 5e-4) ? "TEST PASSED\n" : "TEST FAILED\n");
}
}
class timer {
public:
timer() { start(); }
void start() { t1_ = std::chrono::steady_clock::now(); }
void stop() { t2_ = std::chrono::steady_clock::now(); }
auto duration() { return std::chrono::duration<double>(t2_ - t1_).count(); }
private:
std::chrono::steady_clock::time_point t1_, t2_;
};
#endif // __BLACK_SCHOLES_HPP__
|
hpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/black_scholes/src/black_scholes_sycl.cpp
|
//==============================================================
// Copyright © 2023 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include<cstdio>
#if !SYCL_LANGUAGE_VERSION
#error "SYCL is not enabled""
#endif
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include "black_scholes.hpp"
constexpr int sg_size = 32;
#if NON_DEFAULT_SIZE
constexpr int wg_size = 128;
constexpr int block_size = 1;
#else
constexpr int wg_size = 256;
constexpr int block_size = 4;
#endif
sycl::queue* black_scholes_queue;
template<typename Type, int>
class k_BlackScholes;
#if USE_CNDF_C
template <typename T>
__attribute__((always_inline))
static inline T CNDF_C(T input)
{
constexpr T inv_sqrt_2xPI = 0.39894228040143270286;
constexpr T CNDF_C1 = 0.2316419;
constexpr T CNDF_C2 = 0.319381530;
constexpr T CNDF_C3 = -0.356563782;
constexpr T CNDF_C4 = 1.781477937;
constexpr T CNDF_C5 = -1.821255978;
constexpr T CNDF_C6 = 1.330274429;
constexpr T CNDF_LN2 = 0.693147180559945309417;
constexpr T INV_LN2X2 = 1.0 / (CNDF_LN2 * 2.0);
T x = (input < 0.0) ? -input : input;
T k2 = 1.0 / (1.0 + CNDF_C1 * x);
T k2_2 = k2 * k2;
T k2_3 = k2_2 * k2;
T k2_4 = k2_3 * k2;
T k2_5 = k2_4 * k2;
T output = 1.0 - (inv_sqrt_2xPI * sycl::exp2(-x * x * INV_LN2X2) * ((CNDF_C2 * k2) +
((CNDF_C3 * (k2_2)) + (CNDF_C4 * (k2_3)) + (CNDF_C5 * (k2_4)) + (CNDF_C6 * (k2_5)))));
if (input < 0.0)
output = (1.0 - output);
return output;
}
#endif // USE_CNDF_C
void BlackScholes::body() {
// this can not be captured to the kernel. So, we need to copy internals of the class to local variables
DATA_TYPE* h_stock_price_local = this->h_stock_price;
DATA_TYPE* h_option_years_local = this->h_option_years;
DATA_TYPE* h_option_strike_local = this->h_option_strike;
DATA_TYPE* h_call_result_local = this->h_call_result;
DATA_TYPE* h_put_result_local = this->h_put_result;
black_scholes_queue->parallel_for<k_BlackScholes<DATA_TYPE, block_size>>(sycl::nd_range(sycl::range<1>(opt_n / block_size), sycl::range<1>(wg_size)),
[=](sycl::nd_item<1> item) [[intel::kernel_args_restrict]] [[intel::reqd_sub_group_size(sg_size)]] {
auto local_id = item.get_local_linear_id();
auto group_id = item.get_group_linear_id();
#pragma unroll
for (size_t opt = group_id * block_size * wg_size + local_id, i = 0; i < block_size; opt += wg_size, i++) {
constexpr DATA_TYPE sigma = volatility;
const DATA_TYPE s = h_stock_price_local[opt];
const DATA_TYPE t = h_option_years_local[opt];
const DATA_TYPE x = h_option_strike_local[opt];
const DATA_TYPE XexpRT = x * sycl::exp(-risk_free * t);
#if USE_CNDF_C
const DATA_TYPE v_sqrt = sigma * sycl::sqrt(t);
const DATA_TYPE d1 = (sycl::log(s / x) + (risk_free + DATA_TYPE(0.5) * sigma * sigma) * t) / v_sqrt;
const DATA_TYPE d2 = d1 - v_sqrt;
const DATA_TYPE n_d1 = CNDF_C(d1);
const DATA_TYPE n_d2 = CNDF_C(d2);
#else
constexpr DATA_TYPE sqrt1_2 = 0.707106781186547524401;
DATA_TYPE n_d1 = DATA_TYPE(1. / 2.) + DATA_TYPE(1. / 2.) * sycl::erf(((sycl::log(s / x) + (risk_free + DATA_TYPE(0.5) * sigma * sigma) * t) / (sigma * sycl::sqrt(t))) * sqrt1_2);
DATA_TYPE n_d2 = DATA_TYPE(1. / 2.) + DATA_TYPE(1. / 2.) * sycl::erf(((sycl::log(s / x) + (risk_free - DATA_TYPE(0.5) * sigma * sigma) * t) / (sigma * sycl::sqrt(t))) * sqrt1_2);
#endif // USE_CNDF_C
const DATA_TYPE call_val = s * n_d1 - XexpRT * n_d2;
const DATA_TYPE put_val = call_val + XexpRT - s;
h_call_result_local[opt] = call_val;
h_put_result_local[opt] = put_val;
}
});
}
BlackScholes::BlackScholes()
{
black_scholes_queue = new sycl::queue;
h_call_result = sycl::malloc_shared<DATA_TYPE>(opt_n, *black_scholes_queue);
h_put_result = sycl::malloc_shared<DATA_TYPE>(opt_n, *black_scholes_queue);
h_stock_price = sycl::malloc_shared<DATA_TYPE>(opt_n, *black_scholes_queue);
h_option_strike = sycl::malloc_shared<DATA_TYPE>(opt_n, *black_scholes_queue);
h_option_years = sycl::malloc_shared<DATA_TYPE>(opt_n, *black_scholes_queue);
black_scholes_queue->fill(h_call_result, 0.0, opt_n);
black_scholes_queue->fill(h_put_result, 0.0, opt_n);
constexpr int rand_seed = 777;
namespace mkl_rng = oneapi::mkl::rng;
// create random number generator object
mkl_rng::philox4x32x10 engine(
#if !INIT_ON_HOST
*black_scholes_queue,
#else
sycl::queue{sycl::cpu_selector_v},
#endif // !INIT_ON_HOST
rand_seed);
sycl::event event_1 = mkl_rng::generate(mkl_rng::uniform<DATA_TYPE>(5.0, 50.0), engine, opt_n, h_stock_price);
sycl::event event_2 = mkl_rng::generate(mkl_rng::uniform<DATA_TYPE>(10.0, 25.0), engine, opt_n, h_option_strike);
sycl::event event_3 = mkl_rng::generate(mkl_rng::uniform<DATA_TYPE>(1.0, 5.0), engine, opt_n, h_option_years);
sycl::event::wait({event_1, event_2, event_3});
}
BlackScholes::~BlackScholes()
{
sycl::free(h_call_result, *black_scholes_queue);
sycl::free(h_put_result, *black_scholes_queue);
sycl::free(h_stock_price, *black_scholes_queue);
sycl::free(h_option_strike, *black_scholes_queue);
sycl::free(h_option_years, *black_scholes_queue);
delete black_scholes_queue;
}
void BlackScholes::run()
{
std::printf("%s Precision Black&Scholes Option Pricing version %d.%d running on %s using DPC++, workgroup size %d, sub-group size %d.\n",
sizeof(DATA_TYPE) > 4 ? "Double" : "Single", MAJOR, MINOR, black_scholes_queue->get_device().get_info<sycl::info::device::name>().c_str(), wg_size, sg_size);
std::printf("Compiler Version: %s, LLVM %d.%d based.\n", __VERSION__, __clang_major__, __clang_minor__);
std::printf("Driver Version : %s\n", black_scholes_queue->get_device().get_info<sycl::info::device::driver_version>().c_str());
std::printf("Build Time : %s %s\n", __DATE__, __TIME__);
std::printf("Input Dataset : %zu\n", opt_n);
size_t total_options = 2 * opt_n /*Pricing Call and Put options at the same time, so 2*num_options*/ * ITER_N;
body();
black_scholes_queue->wait();
std::printf("Pricing %zu Options in %d iterations, %zu Options in total.\n", 2 * opt_n, ITER_N, total_options); fflush(stdout);
timer t{};
t.start();
for (int i = 0; i < ITER_N; i++) {
body();
}
black_scholes_queue->wait();
t.stop();
std::printf("Completed in %10.5f seconds. GOptions per second: %10.5f\n", t.duration(), static_cast<double>(total_options) / t.duration() / 1e9);
std::printf("Time Elapsed = %10.5f seconds\n", t.duration()); fflush(stdout);
}
int main(int const argc, char const* argv[])
{
BlackScholes test{};
test.run();
test.check();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/block_cholesky_decomposition/auxi.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
* Content:
* Auxiliary subroutines for:
* - Testing accuracy of Cholesky factorization by computing ratio
* ||A-L*L^t||_F/||A||_F of Frobenius norms of the residual to the
* Frobenius norm of the initial matrix and comparing it to 5*EPS.
* - Calculating max_(i=1,...,NRHS){||AX(i)-F(i)||/||F(i)||} of
* ratios of residuals to norms of RHS vectors for a system of
* linear equations with tridiagonal coefficient matrix and
* multiple RHS
*
***********************************************************************/
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iostream>
#include <vector>
#include "mkl.h"
/************************************************************************
* Definition:
* ===========
* double test_res(int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* d1, int64_t ldd1, double* b1, int64_t ldb1, double* d2, int64_t ldd2, double* b2, int64_t ldb2)
*
* Purpose:
* ========
* Given L*L^t factorization of block tridiagonal matrix A TEST_RES
* computes ratio ||A-L*L^t||_F/||A||_F of Frobenius norm of the residual
* to the Frobenius norm of the initial matrix. The test is considered as
* passed if the ratio does not exceed 5*EPS. The result is returned via
* value of INFO.
*
* Arguments:
* ==========
* N (input) int64_t
* The number of block rows of the matrix A. N > 0.
*
* NB (input) int64_t
* The size of blocks. NB > 0.
*
* D (input) double array, dimension (LDD) * (N*NB)
* The array stores N diagonal blocks (each of size NB by NB)
* of the triangular factor L if factorized matrix. The blocks
* are stored sequentially block by block.
* Caution: upper triangles of diagonal blocks are not zeroed*
* =======
*
* LDD (input) int64_t.
* The leading dimension of the array D, LDD >= NB
*
* B (input) double array, dimension (LDB,(N-1)*NB)
* The array stores sub-diagonal blocks of triangular factor L.
* The blocks are stored sequentially block by block.
*
* LDB (input) int64_t.
* The leading dimension of the array B, LDB >= NB
*
* D1 (work array) double array, dimension (LDD1,N*NB)
* The array is destined for internal computations.
*
* LDD1 (input) int64_t.
* The leading dimension of the array D1, LDD1 >= NB
*
* B1 (work array) double array, dimension (LDB1,(N-1)*NB)
* The array is destined for internal computations.
*
* LDB1 (input) int64_t.
* The leading dimension of the array B1, LDB1 >= NB
*
* D2 (input) double array, dimension (LDD2,N*NB)
* The array stores N diagonal blocks (each of size NB by NB)
* of the initial symmetric positive definite matrix A.
* The blocks are stored sequentially block by block. The
* array is used for comparison.
*
* LDD2 (input) int64_t.
* The leading dimension of the array D2, LDD2 >= NB
*
* B2 (input) double array, dimension (LDB2,(N-1)*NB)
* The array stores sub-diagonal blocks of the initial symmetric
* positive definite matrix A. The blocks are stored
* sequentially block by block. The array is used for comparison.
*
* LDB2 (input) int64_t.
* The leading dimension of the array B2, LDB2 >= NB
*
* INFO (output) int64_t
* = 0: successful exit
* < 0: if INFO = -i, the i-th argument had an illegal value
* = 1: the ratio ||A-L*L^t||_F/||A||_F exceeds 5*EPS
***********************************************************************/
double test_res(int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* d1, int64_t ldd1, double* b1, int64_t ldb1, double* d2, int64_t ldd2, double* b2, int64_t ldb2) {
// Matrix accessors
auto D = [=](int64_t i, int64_t j) -> double& { return d[i + j*ldd]; };
auto D1 = [=](int64_t i, int64_t j) -> double& { return d1[i + j*ldd1]; };
auto D2 = [=](int64_t i, int64_t j) -> double& { return d2[i + j*ldd2]; };
auto B = [=](int64_t i, int64_t j) -> double& { return b[i + j*ldb]; };
auto B1 = [=](int64_t i, int64_t j) -> double& { return b1[i + j*ldb1]; };
auto B2 = [=](int64_t i, int64_t j) -> double& { return b2[i + j*ldb2]; };
// Compute S2 = ||A||_F
double s = LAPACKE_dlange(MKL_COL_MAJOR, 'F', nb, nb*n, d2, ldd2);
double s1 = LAPACKE_dlange(MKL_COL_MAJOR, 'F', nb, nb*(n-1), b2, ldb2);
double s2 = sqrt(s*s+2.0*s1*s1);
// Copy D -> D1, B -> B1 and nullify the upper triangle of blocks in D1
for (int64_t k = 0; k < n; k++) {
for(int64_t j = 0; j < nb; j++) {
cblas_dcopy(nb-j, &D(j, k*nb+j), 1, &D1(j, k*nb+j), ldd1);
for (int64_t i = 0; i < j; i++) {
D1(j, k*nb+i) = 0.0;
}
}
}
for (int64_t k = 0; k < n-1; k++) {
for (int64_t j = 0; j < nb; j++) {
cblas_dcopy(nb, &B(0, k*nb+j), 1, &B1(0, k*nb+j), 1);
}
}
// Compute product of lower block bidiagonal matrix by its transpose
// | L_1 | | L_1^t B_1^t |
// | B_1 L_2 | | L_2^t B_2^t |
// | . . |*| . . |
// | . . | | . . |
// | B_N-2 L_N-1 | | L_N-1^t B_N-1^t|
// | B_N-1 L_N | | L_N^t |
//
// Result matrix has the following structure
// D_1 B_1^t
// B_1 D_2 B_2^t
// B_2 D_3 B_3^t
// . . .
// . . .
// B_N-2 D_N-1 B_N-1^t
// B_N-1 D_N
//
// D_1 := L_1*L_1^t
cblas_dtrmm(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit, nb, nb, 1.0, d, ldd, d1, ldd1);
for (int64_t k = 0; k < n-1; k++) {
// B_k := B_k*L_k^t
cblas_dtrmm(CblasColMajor, CblasRight, CblasLower, CblasTrans, CblasNonUnit,
nb, nb, 1.0, &D(0, k*nb), ldd, &B1(0, k*nb), ldb1);
// D_k := L_k*L_k^t
cblas_dtrmm(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit,
nb, nb, 1.0, &D(0, (k+1)*nb), ldd, &D1(0, (k+1)*nb), ldd1);
// D_k := D_k + B_k*B_k^t
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, nb, nb, nb, 1.0,
&B(0, k*nb), ldb, &B(0, k*nb), ldb, 1.0,
&D1(0, (k+1)*nb), ldd1);
}
// Compute the difference between the calculated product L*L^t and initial
// matrix that was factored
for (int64_t j = 0; j < nb*n; j++) {
for (int64_t i = 0; i < nb; i++) {
D1(i,j) = D1(i,j) - D2(i,j);
}
}
for (int64_t j = 0; j < nb*(n-1); j++) {
for (int64_t i = 0; i < nb; i++) {
B1(i,j) = B1(i,j) - B2(i,j);
}
}
s = LAPACKE_dlange(MKL_COL_MAJOR, 'F', nb, nb*n, d1, ldd1);
s1 = LAPACKE_dlange(MKL_COL_MAJOR, 'F', nb, nb*(n-1), b1, ldb1);
s = sqrt(s*s+2.0*s1*s1)/s2;
return s;
}
/************************************************************************
* Definition:
* ===========
* double test_res1(int64_t n, int64_t nrhs, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* f, int64_t ldf, double* x, int64_t ldx )
*
* Purpose:
* ========
* Given approximate solution X of system of linear equations A*X=F
* with symmetric positive definite block tridiagonal coefficient matrix
* A =
* D_1 B_1^t
* B_1 D_2 B_2^t
* B_2 D_3 B_3^t
* . . .
* . . .
* B_N-2 D_N-1 B_N-1^t
* B_N-1 D_N
* the routine computes max_(i=1,...,NRHS){||AX(i)-F(i)||/F(i)} of ratios
* of residuals to norms of RHS vectors. he test is considered as passed
* if the value does not exceed 10*EPS where EPS is the machine
* precision.
*
* Arguments:
* ==========
* N (input) int64_t
* The number of block rows of the matrix A. N > 0.
*
* NRHS (input) int64_t
* The number of right hand sides (number of columns in matrix F.
*
* NB (input) int64_t
* The block size of blocks D_j, B_j
*
* D (input) double array, dimension (LDD) * (N*NB)
* The array stores N diagonal blocks (each of size NB by NB)
* of matrix A. The blocks are stored sequentially block by
* block.
* Caution: The diagonal blocks are symmetric matrices - this
* =======
* feature is assumed.
*
* LDD (input) int64_t.
* The leading dimension of the array D, LDD >= NB
*
* B (input) double array, dimension (LDB) * ((N-1)*NB)
* The array stores sub-diagonal blocks of matrix A.
* The blocks are stored sequentially block by block.
*
* LDB (input) int64_t.
* The leading dimension of the array B, LDB >= NB
*
* F (input) double array, dimension (LDF) * (NRHS)
* The right hand sides of the system of linear equations.
*
* LDF (input) int64_t.
* The leading dimension of the array F, LDF >= NB*N
*
* X (input) double array, dimension (LDX) * (NRHS)
* The solutions of the system of linear equations.
*
* LDX (input) int64_t.
* The leading dimension of the array X, LDX >= NB*N
*
* INFO (output) int64_t
* = 0: successful exit
* < 0: if INFO = -i, the i-th argument had an illegal value
* = 1: max_(i=1,...,NRHS){||AX(i)-F(i)||/F(i)} exceeds 10*EPS
* = 10: note enough memory for internal array
***********************************************************************/
double test_res1(int64_t n, int64_t nrhs, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* f, int64_t ldf, double* x, int64_t ldx ) {
// Matrix accessors
auto D = [=](int64_t i, int64_t j) -> double& { return d[i + j*ldd]; };
auto B = [=](int64_t i, int64_t j) -> double& { return b[i + j*ldb]; };
auto F = [=](int64_t i, int64_t j) -> double& { return f[i + j*ldf]; };
auto X = [=](int64_t i, int64_t j) -> double& { return x[i + j*ldx]; };
std::vector<double> norms(nrhs);
// Compute norms of RHS vectors
for (int64_t i = 0; i < nrhs; i++) {
norms[i] = cblas_dnrm2(nb*n, &F(0,i), 1);
}
// Out-of-loop compute F(1):=F(1)-D(1)*X(1)-B(1)^t*X(2)
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nb, nrhs, nb, -1.0, d, ldd, x, ldx, 1.0, f, ldf);
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, nb, nrhs, nb, -1.0, b,ldb, &X(nb, 0), ldx, 1.0, f, ldf);
for (int64_t k = 1; k < n-1; k++) {
// Compute F(K):=F(K)-B(K-1)*X(K-1)-D(K)*X(K)-B(K)^t*X(K+1)
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nb, nrhs, nb, -1.0, &B(0, (k-1)*nb), ldb, &X((k-1)*nb, 0), ldx, 1.0, &F(k*nb, 0), ldf);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nb, nrhs, nb, -1.0, &D(0, k*nb), ldd, &X( k*nb, 0), ldx, 1.0, &F(k*nb, 0), ldf);
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, nb, nrhs, nb, -1.0, &B(0, k*nb), ldb, &X((k+1)*nb, 0), ldx, 1.0, &F(k*nb, 0), ldf);
}
// Out-of-loop compute F(N):=F(N)-B(N-1)*X(N-1)-D(N)*X(N)
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nb, nrhs, nb, -1.0, &B(0, (n-2)*nb), ldb, &X((n-2)*nb, 0), ldx, 1.0, &F((n-1)*nb, 0), ldf);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nb, nrhs, nb, -1.0, &D(0, (n-1)*nb), ldd, &X((n-1)*nb, 0), ldx, 1.0, &F((n-1)*nb, 0), ldf);
// Compute norms of residual vectors divided by norms of RHS vectors
double res = 0.0;
for (int64_t i = 0; i < nrhs; i++) {
double s = cblas_dnrm2(n*nb, &F(0,i), 1);
res = std::max<double>(res, s/norms[i]);
}
return res;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/block_cholesky_decomposition/dpbltrf.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
*
* Content:
* Function DPBLTRF for Cholesky factorization of symmetric
* positive definite block tridiagonal matrix.
************************************************************************/
#include <cstdint>
#include <sycl/sycl.hpp>
#include "oneapi/mkl.hpp"
using namespace oneapi;
/************************************************************************
* Definition:
* ===========
* int64_t dpbltrf(sycl::queue queue, int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb) {
*
* Purpose:
* ========
* DPBLTRF computes Cholesky L*L^t-factorization of symmetric positive
* definite block tridiagonal matrix A
* D_1 B_1^t
* B_1 D_2 B_2^t
* B_2 D_3 B_3^t
* . . .
* . . .
* B_N-2 D_N-1 B_N-1^t
* B_N-1 D_N
* The factorization has the form A = L*L**t, where L is a lower
* bidiagonal block matrix
* L_1
* C_1 L_2
* C_2 L_3
* . . .
* . . .
* C_N-2 L_N-1
* C_N-1 L_N
* This is a block version of LAPACK DPTTRF subroutine.
*
* Arguments:
* ==========
* QUEUE (input) sycl queue
* The device queue
*
* N (input) int64_t
* The number of block rows of the matrix A. N >= 0.
*
* NB (input) int64_t
* The size of blocks. NB >= 0.
*
* D (input/output) double array, dimension (LDD)*(N*NB)
* On entry, the array stores N diagonal blocks (each of size NB by
* NB) of the matrix to be factored. The blocks are stored
* sequentially: first NB columns of D store block D_1, second NB
* columns store block D_2,...,last NB columns store block D_N.
* Note: As the diagonal blocks are symmetric only lower or upper
* ====
* triangle is needed to store blocks' elements. In this code
* lower storage is used***
* On exit, the array stores diagonal blocks of triangular factor L.
* Diagonal blocks of lower triangular factor L replace
* respective lower triangles of blocks D_j (1 <= j <= N).
* Caution: upper triangles of diagonal blocks are not zeroed on exit
*
* LDD (input) int64_t
* The leading dimension of array D. LDD >= NB.
*
* B (input/output) double array, dimension (LDB)*((N-1)*NB)
* On entry, the array stores sub-diagonal blocks (each of size NB
* by NB) of the matrix to be factored. The blocks are stored
* sequentially: first NB columns of B store block B_1, second
* NB columns store block B_2,...,last NB columns store block
* B_N-1.
* On exit, the array stores sub-diagonal blocks of triangular factor
* L.
*
* LDB (input) int64_t
* The leading dimension of array B. LDB >= NB.
*
* INFO (return) int64_t
* = 0: successful exit
* < 0: if INFO = -i, the i-th argument had an illegal value
* > 0: if INFO = i, the leading minor of order i (and
* therefore the matrix A itself) is not
* positive-definite, and the factorization could not be
* completed. This may indicate an error in forming the
* matrix A.
***********************************************************************/
int64_t dpbltrf(sycl::queue queue, int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb) {
// Matrix accessors
auto D = [=](int64_t i, int64_t j) -> double& { return d[(i) + (j)*ldd]; };
auto B = [=](int64_t i, int64_t j) -> double& { return b[(i) + (j)*ldb]; };
int64_t info = 0;
if (n < 0)
info = -1;
else if (nb < 0)
info = -2;
else if (ldd < nb)
info = -4;
else if (ldb < nb)
info = -6;
if (info)
return info;
sycl::context context = queue.get_context();
sycl::device device = queue.get_device();
// Compute Cholesky factorization of the first diagonal block
try {
std::int64_t scratchpad_size = mkl::lapack::potrf_scratchpad_size<double>(queue, mkl::uplo::lower, nb, ldd);
double* scratchpad = static_cast<double*>(sycl::malloc_shared(scratchpad_size * sizeof(double), device, context));
if (scratchpad_size != 0 && !scratchpad) {
info = -1000;
goto cleanup;
}
auto event1 = mkl::lapack::potrf(queue, mkl::uplo::lower, nb, d, ldd, scratchpad, scratchpad_size );
event1.wait_and_throw();
sycl::free(scratchpad, context);
} catch(mkl::lapack::exception const& e) {
// Handle LAPACK related exceptions happened during synchronous call
std::cout << "Unexpected exception caught during synchronous call to LAPACK API:\ninfo: " << e.info() << std::endl;
if (e.info() > 0) {
// INFO is equal to the 'global' index of the element u_ii of the factor
// U which is equal to zero
info = e.info();
}
return info;
}
// Main loop
for (int64_t k = 0; k < n-1; k++) {
auto event1 = mkl::blas::trsm(queue, mkl::side::right, mkl::uplo::lower, mkl::transpose::trans,
mkl::diag::nonunit, nb, nb, 1.0, &D(0,k*nb), ldd, &B(0,k*nb), ldb);
auto event2 = mkl::blas::syrk(queue, mkl::uplo::lower, mkl::transpose::nontrans, nb, nb,
-1.0, &B(0,k*nb), ldb, 1.0, &D(0,(k+1)*nb), ldd, {event1});
event2.wait_and_throw();
try {
std::int64_t scratchpad_size = mkl::lapack::potrf_scratchpad_size<double>(queue, mkl::uplo::lower, nb, ldd);
double* scratchpad = static_cast<double*>(sycl::malloc_shared(scratchpad_size * sizeof(double), device, context));
if (scratchpad_size != 0 && !scratchpad) {
info = -1000;
goto cleanup;
}
auto event1 = mkl::lapack::potrf(queue, mkl::uplo::lower, nb, &D(0,(k+1)*nb), ldd, scratchpad, scratchpad_size );
event1.wait_and_throw();
sycl::free(scratchpad, context);
} catch(mkl::lapack::exception const& e) {
// Handle LAPACK related exceptions happened during synchronous call
std::cout << "Unexpected exception caught during synchronous call to LAPACK API:\ninfo: " << e.info() << std::endl;
if (e.info() > 0) {
// INFO is equal to the 'global' index of the element u_ii of the factor
// U which is equal to zero
info = e.info() + (k+1)*nb;
}
return info;
}
}
return info;
cleanup:
return info;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/block_cholesky_decomposition/factor.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
*
* Content:
* Example of Cholesky factorization of a symmetric positive
* definite block tridiagonal matrix
************************************************************************
* Purpose:
* ========
* Testing accuracy of Cholesky factorization A=
* | L_1 | | L_1^t C_1^t |
* | C_1 L_2 | | L_2^t C_2^t |
* A = | . . |*| . . |
* | . . | | . C_N-1^t |
* | C_N-1 L_N | | L_N^t |
*
* of a symmetric positive definite block tridiagonal matrix A
* | D_1 B_1^t |
* | B_1 D_2 B_2^t |
* | B_2 D_3 B_3^t |
* | . . . |
* | . . . |
* | B_N-2 D_N-1 B_N-1^t |
* | B_N-1 D_N |
* by calling TEST_RES which calculates ratio of Frobenius norms
* ||A-L*L^t||_F/||A||_F.
*/
#include <cstdint>
#include <iostream>
#include <vector>
#include <sycl/sycl.hpp>
#include "oneapi/mkl.hpp"
using namespace oneapi;
int64_t dpbltrf(sycl::queue queue, int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb);
double test_res(int64_t, int64_t, double*, int64_t, double*, int64_t, double*, int64_t, double*, int64_t, double*, int64_t, double*, int64_t);
template<typename T>
using allocator_t = sycl::usm_allocator<T, sycl::usm::alloc::shared>;
int main() {
if (sizeof(MKL_INT) != sizeof(int64_t)) {
std::cerr << "MKL_INT not 64bit" << std::endl;
return -1;
}
int64_t info = 0;
// Asynchronous error handler
auto error_handler = [&] (sycl::exception_list exceptions) {
for (auto const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch(mkl::lapack::exception const& e) {
// Handle LAPACK related exceptions happened during asynchronous call
info = e.info();
std::cout << "Unexpected exception caught during asynchronous LAPACK operation:\ninfo: " << e.info() << std::endl;
} catch(sycl::exception const& e) {
// Handle not LAPACK related exceptions happened during asynchronous call
std::cout << "Unexpected exception caught during asynchronous operation:\n" << e.what() << std::endl;
info = -1;
}
}
};
sycl::device device{sycl::default_selector{}};
sycl::queue queue(device, error_handler);
sycl::context context = queue.get_context();
if (device.get_info<sycl::info::device::double_fp_config>().empty()) {
std::cerr << "The sample uses double precision, which is not supported" << std::endl;
std::cerr << "by the selected device. Quitting." << std::endl;
return 0;
}
allocator_t<double> allocator_d(context, device);
MKL_INT n = 200;
MKL_INT nb = 20;
std::vector<double, allocator_t<double>> d(nb * n*nb, allocator_d);
std::vector<double, allocator_t<double>> b(nb * (n-1)*nb, allocator_d);
std::vector<double> d1(nb * n*nb);
std::vector<double> b1(nb * (n-1)*nb);
std::vector<double> d2(nb * n*nb);
std::vector<double> b2(nb * (n-1)*nb);
std::vector<MKL_INT> iseed = {1, 2, 33, 15};
auto D = [=,&d](int64_t i, int64_t j) -> double& { return d[i + j*nb]; };
std::cout << "Testing accuracy of Cholesky factorization\n";
std::cout << "of randomly generated positive definite symmetric\n";
std::cout << "block tridiagonal matrix by calculating residual.\n\n";
std::cout << "Matrix size = " << n << "\n";
std::cout << "Block size = " << nb << "\n";
std::cout << "...\n";
std::cout << "Matrices are being generated.\n";
std::cout << "...\n";
// Initializing arrays randomly
LAPACKE_dlarnv(2, iseed.data(), (n-1)*nb*nb, b.data());
cblas_dcopy((n-1)*nb*nb, b.data(), 1, b2.data(), 1);
for (int64_t k = 0; k < n; k++) {
for (int64_t j = 0; j < nb; j++) {
LAPACKE_dlarnv(2, iseed.data(), nb-j, &D(j,k*nb+j));
cblas_dcopy(nb-j, &D(j+1, k*nb+j), 1, &D(j, k*nb+j+1), nb);
}
// Diagonal dominance to make the matrix positive definite
for (int64_t j = 0; j < nb; j++) {
D(j, k*nb+j) += nb*3.0;
}
}
cblas_dcopy(n*nb*nb, d.data(), 1, d2.data(), 1);
std::cout << "Call Cholesky factorization\n";
std::cout << "...\n";
try {
info = dpbltrf(queue, n, nb, d.data(), nb, b.data(), nb);
} catch(sycl::exception const& e) {
// Handle not LAPACK related exceptions happened during synchronous call
std::cout << "Unexpected exception caught during synchronous call to SYCL API:\n" << e.what() << std::endl;
info = -1;
}
if(info) {
std::cout << "Factorization failed. info = " << info << std::endl;
return 1;
} else {
std::cout << "Cholesky factorization succeeded." << std::endl;
std::cout << "Testing the residual" << std::endl;
std::cout << "..." << std::endl;
double res = test_res(n, nb, d.data(), nb, b.data(), nb, d1.data(), nb, b1.data(), nb, d2.data(), nb, b2.data(), nb);
double eps = LAPACKE_dlamch('E');
std::cout << "Residual test" << std::endl;
std::cout << "||A-L*L^t||_F/||A||_F <= 5*EPS..." << std::endl;
if (res/eps > 5.0) {
std::cout << "failed: ||A-L*L^t||_F/||A||_F = " << res << std::endl;
return 1;
} else {
std::cout << "passed" << std::endl;
}
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/block_cholesky_decomposition/dpbltrs.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
*
* Content:
* Function DPBLTRS for solving a system of linear equations with
* Cholesky factored symmetric positive definite block tridiagonal
* coefficient matrix.
************************************************************************/
#include <cstdint>
#include <sycl/sycl.hpp>
#include "oneapi/mkl.hpp"
using namespace oneapi;
/************************************************************************
* Definition:
* ===========
* int64_t dpbltrs(sycl::queue queue, int64_t n, int64_t nrhs, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* f, int64_t ldf)
*
* Purpose:
* ========
* DPBLTRS computes a solution to system of linear equations A*X=F with
* symmetric positive definite block tridiagonal coefficient matrix A
* D_1 B_1^t
* B_1 D_2 B_2^t
* B_2 D_3 B_3^t
* . . .
* . . .
* B_N-2 D_N-1 B_N-1^t
* B_N-1 D_N
* and multiple right hand sides F. Before call this routine the
* coefficient matrix should factored A=L*L^T by calling DPBLTRF where
* L is a lower block bidiagonal matrix
* L_1
* C_1 L_2
* C_2 L_3
* . . .
* . . .
* C_N-2 L_N-1
* C_N-1 L_N
* This is a block version of LAPACK DPTTRS subroutine.
*
* Arguments:
* ==========
* QUEUE (input) sycl queue
* The device queue
*
* N (input) int64_t
* The number of block rows of the matrix A. N >= 0.
*
* NRHS (input) int64_t
* The number of right hand sides (the number of columns in matrix F).
*
* NB (input) int64_t
* The size of blocks. NB >= 0.
*
* D (input) double array, dimension (LDD) * (N*NB)
* On entry, the array stores diagonal blocks of triangular factor L.
* Diagonal blocks L_j of lower triangular factor L are stored as
* respective lower triangles of blocks D_j (1 <= j <= N).
* Caution: upper triangles of D_j are not assumed to be zeroed.
* =======
*
* LDD (input) int64_t
* The leading dimension of array D. LDD >= NB.
*
* B (input) double array, dimension (LDB) * ((N-1)*NB)
* On entry, the array stores sub-diagonal blocks L_j of triangular
* factor L.
*
* LDB (input) int64_t
* The leading dimension of array B. LDB >= NB.
*
* F (input/output) double array, dimension (LDF) * (NRHS)
* On entry, the columns of the array store vectors F(i) of right
* hand sides of system of linear equations A*X=F.
*
* LDF (input) int64_t
* The leading dimension of array F. LDF >= NB*N.
*
* INFO (return) int64_t
* = 0: successful exit
* < 0: if INFO = -i, the i-th argument had an illegal value
* =====================================================================
*/
int64_t dpbltrs(sycl::queue queue, int64_t n, int64_t nrhs, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* f, int64_t ldf) {
auto D = [=](int64_t i, int64_t j) -> double& { return d[i + j*ldd]; };
auto B = [=](int64_t i, int64_t j) -> double& { return b[i + j*ldb]; };
auto F = [=](int64_t i, int64_t j) -> double& { return f[i + j*ldf]; };
// Test the input arguments.
int64_t info = 0;
if (n < 0)
info = -1;
else if (nrhs < 0)
info = -2;
else if (nb < 0)
info = -3;
else if (ldd < nb)
info = -5;
else if (ldb < nb)
info = -7;
else if (ldf < nb*n)
info = -9;
if (info)
return info;
// Solving the system of linear equations L*Y=F
auto event1 = mkl::blas::trsm(queue, mkl::side::left, mkl::uplo::lower, mkl::transpose::nontrans, mkl::diag::nonunit, nb, nrhs, 1.0, d, ldd, f, ldf);
event1.wait_and_throw();
for ( int64_t k = 0; k < n-1; k++) {
auto event2 = mkl::blas::gemm(queue, mkl::transpose::nontrans, mkl::transpose::nontrans, nb, nrhs, nb, -1.0, &B(0, k*nb), ldb, &F(k*nb, 0), ldf, 1.0, &F((k+1)*nb, 0), ldf);
auto event3 = mkl::blas::trsm(queue, mkl::side::left, mkl::uplo::lower, mkl::transpose::nontrans, mkl::diag::nonunit, nb, nrhs, 1.0, &D(0, (k+1)*nb), ldd, &F((k+1)*nb, 0), ldf, {event2});
event3.wait_and_throw();
}
// ..
// Solving the system of linear equations L^T*X=Y
event1 = mkl::blas::trsm(queue, mkl::side::left, mkl::uplo::lower, mkl::transpose::trans, mkl::diag::nonunit, nb, nrhs, 1.0, &D(0, (n-1)*nb), ldd, &F((n-1)*nb, 0), ldf);
event1.wait_and_throw();
for ( int64_t k = n-2; k >= 0; k-- ) {
auto event2 = mkl::blas::gemm(queue, mkl::transpose::trans, mkl::transpose::nontrans, nb, nrhs, nb, -1.0, &B(0, k*nb), ldb, &F((k+1)*nb, 0), ldf, 1.0, &F(k*nb, 0), ldf);
auto event3 = mkl::blas::trsm(queue, mkl::side::left, mkl::uplo::lower, mkl::transpose::trans, mkl::diag::nonunit, nb, nrhs, 1.0, &D(0, k*nb), ldd, &F(k*nb, 0), ldf, {event2});
event3.wait_and_throw();
}
return info;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/block_cholesky_decomposition/solve.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
* Content:
* Example of solving a system of linear equations with symmetric
* positive definite block tridiagonal coefficient matrix Cholesky
* factored
************************************************************************
* Purpose:
* ========
* Testing accuracy of solution of a system of linear equations A*X=F
* with a symmetric positive definite block tridiagonal coefficient
* matrix A
* | D_1 B_1^t |
* | B_1 D_2 B_2^t |
* | B_2 D_3 B_3^t |
* | . . . |
* | . . . |
* | B_N-2 D_N-1 B_N-1^t |
* | B_N-1 D_N |
* preliminarily Cholesky factored as follows:
* | L_1 | | L_1^t C_1^t |
* | C_1 L_2 | | L_2^t C_2^t |
* A = | . . |*| . . |
* | . . | | . C_N-1^t |
* | C_N-1 L_N | | L_N^t |
*
* To test the solution function TES_RES1 is called.
*/
#include <cstdint>
#include <iostream>
#include <vector>
#include <sycl/sycl.hpp>
#include "oneapi/mkl.hpp"
using namespace oneapi;
template<typename T>
using allocator_t = sycl::usm_allocator<T, sycl::usm::alloc::shared>;
int64_t dpbltrf(sycl::queue queue, int64_t n, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb);
int64_t dpbltrs(sycl::queue queue, int64_t n, int64_t nrhs, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* f, int64_t ldf);
double test_res1(int64_t n, int64_t nrhs, int64_t nb, double* d, int64_t ldd, double* b, int64_t ldb, double* f, int64_t ldf, double* x, int64_t ldx );
int main() {
if (sizeof(MKL_INT) != sizeof(int64_t)) {
std::cerr << "MKL_INT not 64bit" << std::endl;
return -1;
}
int64_t info = 0;
// Asynchronous error handler
auto error_handler = [&] (sycl::exception_list exceptions) {
for (auto const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch(mkl::lapack::exception const& e) {
// Handle LAPACK related exceptions happened during asynchronous call
info = e.info();
std::cout << "Unexpected exception caught during asynchronous LAPACK operation:\ninfo: " << e.info() << std::endl;
} catch(sycl::exception const& e) {
// Handle not LAPACK related exceptions happened during asynchronous call
std::cout << "Unexpected exception caught during asynchronous operation:\n" << e.what() << std::endl;
info = -1;
}
}
};
sycl::device device{sycl::default_selector{}};
sycl::queue queue(device, error_handler);
sycl::context context = queue.get_context();
if (device.get_info<sycl::info::device::double_fp_config>().empty()) {
std::cerr << "The sample uses double precision, which is not supported" << std::endl;
std::cerr << "by the selected device. Quitting." << std::endl;
return 0;
}
allocator_t<double> allocator_d(context, device);
int64_t n = 200;
int64_t nb = 20;
int64_t nrhs = 10;
int64_t ldf = nb*n;
std::vector<double, allocator_t<double>> d(nb * n*nb, allocator_d);
std::vector<double, allocator_t<double>> b(nb * (n-1)*nb, allocator_d);
std::vector<double, allocator_t<double>> f(ldf * nrhs, allocator_d);
std::vector<double> d2(nb * n*nb);
std::vector<double> b2(nb * (n-1)*nb);
std::vector<double> f2(ldf * nrhs);
auto D = [=,&d](int64_t i, int64_t j) -> double& { return d[i + j*nb]; };
std::vector<MKL_INT> iseed = {1, 2, 3, 19};
std::cout << "Testing accuracy of solution of linear equations system" << std::endl;
std::cout << "with randomly generated positive definite symmetric" << std::endl;
std::cout << "block tridiagonal coefficient matrix by calculating" << std::endl;
std::cout << "ratios of residuals to RHS vectors' norms." << std::endl;
std::cout << "..." << std::endl;
std::cout << "Matrices are being generated." << std::endl;
std::cout << "..." << std::endl;
// Initializing arrays randomly
LAPACKE_dlarnv(2, iseed.data(), (n-1)*nb*nb, b.data());
cblas_dcopy((n-1)*nb*nb, b.data(), 1, b2.data(), 1);
LAPACKE_dlarnv(2, iseed.data(), nrhs*ldf, f.data());
cblas_dcopy(nrhs*ldf, f.data(), 1, f2.data(), 1);
for (int64_t k = 0; k < n; k++) {
for (int64_t j = 0; j < nb; j++) {
LAPACKE_dlarnv(2, iseed.data(), nb-j, &D(j, k*nb+j));
cblas_dcopy(nb-j-1, &D(j+1, k*nb+j), 1, &D(j, k*nb+j+1), nb);
}
// Diagonal dominance to make the matrix positive definite
for (int64_t j = 0; j < nb; j++) {
D(j, k*nb+j) += nb*3.0;
}
}
cblas_dcopy(nb*nb*n, d.data(), 1, d2.data(), 1);
// Factor the coefficient matrix
std::cout << "Call Cholesky factorization" << std::endl;
std::cout << "..." << std::endl;
try {
info = dpbltrf(queue, n, nb, d.data(), nb, b.data(), nb);
} catch(sycl::exception const& e) {
// Handle not LAPACK related exceptions happened during synchronous call
std::cout << "Unexpected exception caught during synchronous call to SYCL API:\n" << e.what() << std::endl;
info = -1;
}
if(info) {
std::cout << "Cholesky factorization failed. INFO = " << info << std::endl;
return 1;
} else {
std::cout << "Cholesky factorization succeeded." << std::endl;
}
// Solve the system of equations with factored coefficient matrix
std::cout << "Call solving the system of linear equations" << std::endl;
std::cout << "..." << std::endl;
info = dpbltrs(queue, n, nrhs, nb, d.data(), nb, b.data(), nb, f.data(), ldf);
if(info) {
std::cout << "Solution failed. INFO= " << info << std::endl;
return 1;
} else {
std::cout << "Solution succeeded." << std::endl;
}
// Test the accuracy of the solution
std::cout << "The system is solved. Testing the residual" << std::endl;
std::cout << "..." << std::endl;
double res = test_res1(n, nrhs, nb, d2.data(), nb, b2.data(), nb, f2.data(), ldf, f.data(), ldf);
double eps = LAPACKE_dlamch('E');
if(res/eps > 10.0) {
std::cout << "Residual test" << std::endl;
std::cout << "max_(i=1,...,NRHS){||A*X(i)-F(i)||/||F(i)||} <= 10*EPS " << std::endl;
std::cout << "failed" << std::endl;
return 1;
} else {
std::cout << "Residual test" << std::endl;
std::cout << "max_(i=1,...,NRHS){||A*X(i)-F(i)||/||F(i)||} <= 10*EPS " << std::endl;
std::cout << "passed" << std::endl;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/binomial/src/binomial_sycl.cpp
|
//==============================================================
// Copyright © 2023 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <cstdio>
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
#include "binomial.hpp"
constexpr int sg_size = 32;
constexpr int wg_size = 128;
sycl::queue* binomial_queue;
Binomial::Binomial() {
binomial_queue = new sycl::queue;
h_call_result = sycl::malloc_shared<DATA_TYPE>(opt_n, *binomial_queue);
h_stock_price = sycl::malloc_shared<DATA_TYPE>(opt_n, *binomial_queue);
h_option_strike = sycl::malloc_shared<DATA_TYPE>(opt_n, *binomial_queue);
h_option_years = sycl::malloc_shared<DATA_TYPE>(opt_n, *binomial_queue);
binomial_queue->fill(h_call_result, DATA_TYPE(0), opt_n);
constexpr int rand_seed = 777;
namespace mkl_rng = oneapi::mkl::rng;
// create random number generator object
mkl_rng::philox4x32x10 engine(
#if !INIT_ON_HOST
*binomial_queue,
#else
sycl::queue{sycl::cpu_selector_v},
#endif
rand_seed);
sycl::event event_1 = mkl_rng::generate(
mkl_rng::uniform<DATA_TYPE>(5.0, 50.0), engine, opt_n, h_stock_price);
sycl::event event_2 = mkl_rng::generate(
mkl_rng::uniform<DATA_TYPE>(10.0, 25.0), engine, opt_n, h_option_strike);
sycl::event event_3 = mkl_rng::generate(
mkl_rng::uniform<DATA_TYPE>(1.0, 5.0), engine, opt_n, h_option_years);
sycl::event::wait({event_1, event_2, event_3});
}
Binomial::~Binomial() {
sycl::free(h_call_result, *binomial_queue);
sycl::free(h_stock_price, *binomial_queue);
sycl::free(h_option_strike, *binomial_queue);
sycl::free(h_option_years, *binomial_queue);
delete binomial_queue;
}
void Binomial::body() {
constexpr int block_size = num_steps / wg_size;
static_assert(block_size * wg_size == num_steps);
// "this" can not be captured to the kernel. So, we need to copy internals of
// the class to local variables
DATA_TYPE* h_stock_price_local = this->h_stock_price;
DATA_TYPE* h_option_years_local = this->h_option_years;
DATA_TYPE* h_option_strike_local = this->h_option_strike;
DATA_TYPE* h_call_result_local = this->h_call_result;
binomial_queue->submit([&](sycl::handler& h) {
sycl::local_accessor<DATA_TYPE> slm_call{wg_size + 1, h};
h.template parallel_for(
sycl::nd_range(sycl::range<1>(opt_n * wg_size),
sycl::range<1>(wg_size)),
[=](sycl::nd_item<1> item)
[[intel::kernel_args_restrict]] [[intel::reqd_sub_group_size(
sg_size)]] {
const size_t opt = item.get_global_id(0) / wg_size;
const DATA_TYPE sx = h_stock_price_local[opt];
const DATA_TYPE xx = h_option_strike_local[opt];
const DATA_TYPE tx = h_option_years_local[opt];
const DATA_TYPE dt = tx / static_cast<DATA_TYPE>(num_steps);
const DATA_TYPE v_dt = volatility * sycl::sqrt(dt);
const DATA_TYPE r_dt = risk_free * dt;
const DATA_TYPE i_f = sycl::exp(r_dt);
const DATA_TYPE df = sycl::exp(-r_dt);
const DATA_TYPE u = sycl::exp(v_dt);
const DATA_TYPE d = sycl::exp(-v_dt);
const DATA_TYPE pu = (i_f - d) / (u - d);
const DATA_TYPE pd = static_cast<DATA_TYPE>(1.0) - pu;
const DATA_TYPE pu_df = pu * df;
const DATA_TYPE pd_df = pd * df;
const DATA_TYPE mul_c = v_dt * static_cast<DATA_TYPE>(2.0);
DATA_TYPE id = v_dt * static_cast<DATA_TYPE>(-num_steps);
DATA_TYPE local_call[block_size + 1];
auto wg = item.get_group();
int local_id = wg.get_local_id(0);
int block_start = block_size * local_id;
id += block_start * mul_c;
for (int i = 0; i < block_size; i++) {
auto d = sx * sycl::exp(id) - xx;
local_call[i] = (d > 0) ? d : 0;
id += mul_c;
}
// Handling num_steps step by last item and putting it direclty to
// SLM last element
if (local_id == wg_size - 1) {
auto d = sx * sycl::exp(id) - xx;
slm_call[wg_size] = (d > 0) ? d : 0;
}
// Start at the final tree time step nodes(leaves) and walk
// backwards to calculate the call option price.
for (int i = num_steps; i > 0; i--) {
// Give and get "next block's local_call[j+1]" (local_call[0] in
// next block) elements across work items
slm_call[local_id] = local_call[0];
if (wg_size > sg_size) {
item.barrier(sycl::access::fence_space::local_space);
}
local_call[block_size] = slm_call[local_id + 1];
if (wg_size > sg_size) {
item.barrier(sycl::access::fence_space::local_space);
}
if (block_start <= i) {
for (int j = 0; j < block_size; j++) {
local_call[j] =
pu_df * local_call[j + 1] + pd_df * local_call[j];
}
}
}
if (local_id == 0) {
h_call_result_local[opt] = local_call[0];
}
});
});
binomial_queue->wait();
}
void Binomial::run() {
std::printf(
"%s Precision Binomial Option Pricing version %d.%d running on %s using "
"DPC++, workgroup size %d, sub-group size %d.\n",
sizeof(DATA_TYPE) > 4 ? "Double" : "Single", MAJOR, MINOR,
binomial_queue->get_device().get_info<sycl::info::device::name>().c_str(),
wg_size, sg_size);
std::printf("Compiler Version: %s, LLVM %d.%d based.\n", __VERSION__,
__clang_major__, __clang_minor__);
std::printf("Driver Version : %s\n",
binomial_queue->get_device()
.get_info<sycl::info::device::driver_version>()
.c_str());
std::printf("Build Time : %s %s\n", __DATE__, __TIME__);
std::printf("Input Dataset : %d\n", opt_n);
std::printf("Pricing %d Options with time step of %d.\n", opt_n, num_steps);
fflush(stdout);
std::printf("Cold iteration.\n");
fflush(stdout);
timer t{};
t.start();
body();
t.stop();
#if REPORT_COLD
std::printf("Completed in %10.5f seconds. Options per second: %10.5f\n",
t.duration(), static_cast<double>(opt_n) / (t.duration()));
#endif
#if REPORT_WARM
std::printf("Warm iteration.\n");
fflush(stdout);
t.start();
body();
t.stop();
std::printf("Completed in %10.5f seconds. Options per second: %10.5f\n",
t.duration(), static_cast<double>(opt_n) / (t.duration()));
#endif
std::printf("Time Elapsed = %10.5f seconds\n", t.duration());
fflush(stdout);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/binomial/src/binomial.hpp
|
//==============================================================
// Copyright © 2023 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef __Binomial_HPP__
#define __Binomial_HPP__
#include <chrono>
#ifndef DATA_TYPE
#define DATA_TYPE double
#endif
#ifndef VERBOSE
#define VERBOSE 0
#endif
/******* VERSION *******/
#define MAJOR 1
#define MINOR 8
/******* VERSION *******/
constexpr float volatility = 0.10f;
constexpr float risk_free = 0.06f;
constexpr int num_steps = 2048;
constexpr int opt_n =
#if SMALL_OPT_N
480;
#else
8 * 1024 * 1024;
#endif
#ifndef __clang_major__
#define __clang_major__ 0
#endif
#ifndef __clang_minor__
#define __clang_minor__ 0
#endif
#ifndef __VERSION__
#define __VERSION__ __clang_major__
#endif
class Binomial {
public:
Binomial();
~Binomial();
void run();
void check();
private:
DATA_TYPE* h_call_result;
DATA_TYPE* h_stock_price;
DATA_TYPE* h_option_strike;
DATA_TYPE* h_option_years;
void body();
};
class timer {
public:
timer() { start(); }
void start() { t1_ = std::chrono::steady_clock::now(); }
void stop() { t2_ = std::chrono::steady_clock::now(); }
auto duration() { return std::chrono::duration<double>(t2_ - t1_).count(); }
private:
std::chrono::steady_clock::time_point t1_, t2_;
};
#endif // __Binomial_HPP__
|
hpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/binomial/src/binomial_main.cpp
|
//==============================================================
// Copyright © 2023 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <cmath>
#include <cstdio>
#include <vector>
#include "binomial.hpp"
// Black-Scholes Reference Implementation
void BlackScholesRefImpl(double& callResult,
double Sf, // Stock price
double Xf, // Option strike
double Tf, // Option years
double Rf, // Riskless rate
double Vf // Volatility rate
) {
// BSM Formula:
// https://www.nobelprize.org/prizes/economic-sciences/1997/press-release/
double S = Sf, L = Xf, t = Tf, r = Rf, sigma = Vf;
double N_d1 =
1. / 2. + 1. / 2. *
std::erf(((log(S / L) + (r + 0.5 * sigma * sigma) * t) /
(sigma * std::sqrt(t))) /
std::sqrt(2.));
double N_d2 =
1. / 2. + 1. / 2. *
std::erf(((log(S / L) + (r - 0.5 * sigma * sigma) * t) /
(sigma * std::sqrt(t))) /
std::sqrt(2.));
callResult = (S * N_d1 - L * std::exp(-r * t) * N_d2);
}
void Binomial::check() {
if (VERBOSE) {
std::printf("Creating the reference result...\n");
std::vector<double> h_call_result_host(opt_n);
for (int opt = 0; opt < opt_n; opt++)
BlackScholesRefImpl(h_call_result_host[opt], h_stock_price[opt],
h_option_strike[opt], h_option_years[opt], risk_free,
volatility);
double sum_delta = 0.0, sum_ref = 0.0, max_delta = 0.0, errorVal = 0.0;
for (int i = 0; i < opt_n; i++) {
double ref = h_call_result_host[i];
auto delta = std::fabs(ref - h_call_result[i]);
if (delta > max_delta) {
max_delta = delta;
}
sum_delta += delta;
sum_ref += std::fabs(ref);
}
if (sum_ref > 1E-5)
std::printf("L1 norm: %E\n", errorVal = sum_delta / sum_ref);
else
std::printf("Avg. diff: %E\n", errorVal = sum_delta / opt_n);
std::printf((errorVal < 5e-4) ? "TEST PASSED\n" : "TEST FAILED\n");
}
}
int main(int argc, char** argv) {
Binomial test;
test.run();
test.check();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/Libraries/oneMKL/monte_carlo_pi/mc_pi.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
/*
*
* Content:
* This file contains Monte Carlo Pi number evaluation benchmark for DPC++
* interface of random number generators.
*
*******************************************************************************/
#include <iostream>
#include <numeric>
#include <vector>
#include <numeric>
#include <sycl/sycl.hpp>
#include "oneapi/mkl.hpp"
using namespace oneapi;
// Value of Pi with many exact digits to compare with estimated value of Pi
static const auto pi = 3.1415926535897932384626433832795;
// Initialization value for random number generator
static const auto seed = 7777;
// Default Number of 2D points
static const auto n_samples = 120000000;
double estimate_pi(sycl::queue& q, size_t n_points) {
double estimated_pi; // Estimated value of Pi
size_t n_under_curve = 0; // Number of points fallen under the curve
// Step 1. Generate n_points * 2 random numbers
// 1.1. Generator initialization
// Create an object of basic random numer generator (engine)
mkl::rng::philox4x32x10 engine(q, seed);
// Create an object of distribution (by default float, a = 0.0f, b = 1.0f)
mkl::rng::uniform distr;
sycl::buffer<float, 1> rng_buf(n_points * 2);
// 1.2. Random number generation
mkl::rng::generate(distr, engine, n_points * 2, rng_buf);
// Step 2. Count points under curve (x ^ 2 + y ^ 2 < 1.0f)
size_t wg_size = std::min(q.get_device().get_info<sycl::info::device::max_work_group_size>(), n_points);
size_t max_compute_units = q.get_device().get_info<sycl::info::device::max_compute_units>();
size_t wg_num = (n_points > wg_size * max_compute_units) ? max_compute_units : 1;
size_t count_per_thread = n_points / (wg_size * wg_num);
std::vector<size_t> count(wg_num);
{
sycl::buffer<size_t, 1> count_buf(count);
q.submit([&] (sycl::handler& h) {
auto rng_acc = rng_buf.template get_access<sycl::access::mode::read>(h);
auto count_acc = count_buf.template get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(wg_size * wg_num, wg_size),
[=](sycl::nd_item<1> item) {
sycl::vec<float, 2> r;
size_t count = 0;
for(int i = 0; i < count_per_thread; i++) {
r.load(i + item.get_global_linear_id() * count_per_thread, rng_acc.get_pointer());
if(sycl::length(r) <= 1.0f) {
count += 1;
}
}
count_acc[item.get_group_linear_id()] = sycl::reduce_over_group(item.get_group(), count, std::plus<size_t>());
});
});
}
n_under_curve = std::accumulate(count.begin(), count.end(), 0);
// Step 3. Calculate approximated value of Pi
estimated_pi = n_under_curve / ((double)n_points) * 4.0;
return estimated_pi;
}
int main(int argc, char ** argv) {
std::cout << std::endl;
std::cout << "Monte Carlo pi Calculation Simulation" << std::endl;
std::cout << "Buffer Api" << std::endl;
std::cout << "-------------------------------------" << std::endl;
double estimated_pi;
size_t n_points = n_samples;
if(argc >= 2) {
n_points = atol(argv[1]);
if(n_points == 0) {
n_points = n_samples;
}
}
std::cout << "Number of points = " << n_points << std::endl;
// This exception handler with catch async exceptions
auto exception_handler = [&](sycl::exception_list exceptions) {
for(std::exception_ptr const& e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const& e) {
std::cout << "Caught asynchronous SYCL exception:\n" << e.what() << std::endl;
std::terminate();
}
}
};
try {
// Queue constructor passed exception handler
sycl::queue q(sycl::default_selector{}, exception_handler);
// Launch Pi number calculation
estimated_pi = estimate_pi(q, n_points);
} catch (...) {
// Some other exception detected
std::cout << "Failure" << std::endl;
std::terminate();
}
// Printing results
std::cout << "Estimated value of Pi = " << estimated_pi << std::endl;
std::cout << "Exact value of Pi = " << pi << std::endl;
std::cout << "Absolute error = " << fabs(pi-estimated_pi) << std::endl;
std::cout << std::endl;
return 0;
}
|
cpp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.