Skip to content

Example

For the most part, we can treat cuinterval just like cmath except that the functions live in the cu:: namespace. In addition, various other useful operations are built-in and can be found under All Operations.

#include <cuinterval/cuinterval.h>
#include <cuda_runtime.h>
#define CUDA_CHECK(x) \
do { \
cudaError_t err = x; \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error in %s at %s:%d: %s (%s=%d)\n", __FUNCTION__, \
__FILE__, __LINE__, cudaGetErrorString(err), \
cudaGetErrorName(err), err); \
abort(); \
} \
} while (0)
__device__ auto f(auto x, auto y)
{
return pow(x - 1.0, 3) - sqr(x) + 4.0;
}
__global__ void kernel(auto *xs, auto *ys, auto *res, std::integral auto n)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
res[i] = f(xs[i], ys[i]);
}
}
int main()
{
constexpr int n = 256;
using T = cu::interval<double>;
T xs[n], ys[n], res[n];
// generate dummy data
for (int i = 0; i < n; i++) {
double v = i;
xs[i] = { { .lb = 0.0, .ub = v } };
ys[i] = { 0.0, v };
}
T *d_xs, *d_ys, *d_res;
CUDA_CHECK(cudaMalloc(&d_xs, n * sizeof(*xs)));
CUDA_CHECK(cudaMalloc(&d_ys, n * sizeof(*ys)));
CUDA_CHECK(cudaMalloc(&d_res, n * sizeof(*res)));
CUDA_CHECK(cudaMemcpy(d_xs, xs, n * sizeof(*xs), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_ys, ys, n * sizeof(*ys), cudaMemcpyHostToDevice));
kernel<<<n, 1>>>(d_xs, d_ys, d_res, n);
CUDA_CHECK(cudaMemcpy(res, d_res, n * sizeof(*res), cudaMemcpyDeviceToHost));
auto r = res[3];
printf("f([0,3], [0,3]) = [%g, %g]\n", r.lb, r.ub);
CUDA_CHECK(cudaFree(d_xs));
CUDA_CHECK(cudaFree(d_ys));
CUDA_CHECK(cudaFree(d_res));
return 0;
}