-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
3 changed files
with
211 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
|
||
// Copyright John Maddock 2016. | ||
// Copyright Matt Borland 2024. | ||
// Use, modification and distribution are subject to the | ||
// Boost Software License, Version 1.0. (See accompanying file | ||
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | ||
|
||
#include <iostream> | ||
#include <iomanip> | ||
#include <vector> | ||
#include <boost/math/special_functions.hpp> | ||
#include "cuda_managed_ptr.hpp" | ||
#include "stopwatch.hpp" | ||
|
||
// For the CUDA runtime routines (prefixed with "cuda_") | ||
#include <cuda_runtime.h> | ||
|
||
typedef double float_type; | ||
|
||
/** | ||
* CUDA Kernel Device code | ||
* | ||
*/ | ||
__global__ void cuda_test(const float_type *in1, const float_type *in2, float_type *out, int numElements) | ||
{ | ||
using std::cos; | ||
int i = blockDim.x * blockIdx.x + threadIdx.x; | ||
|
||
if (i < numElements) | ||
{ | ||
out[i] = boost::math::hypergeometric_pFq(std::initializer_list<float_type>({in1[i]}), std::initializer_list<float_type>({in2[i]}), static_cast<float_type>(1)); | ||
} | ||
} | ||
|
||
/** | ||
* Host main routine | ||
*/ | ||
int main(void) | ||
{ | ||
// Error code to check return values for CUDA calls | ||
cudaError_t err = cudaSuccess; | ||
|
||
// Print the vector length to be used, and compute its size | ||
int numElements = 50000; | ||
std::cout << "[Vector operation on " << numElements << " elements]" << std::endl; | ||
|
||
// Allocate the managed input vector A | ||
cuda_managed_ptr<float_type> input_vector1(numElements); | ||
|
||
// Allocate the managed input vector B | ||
cuda_managed_ptr<float_type> input_vector2(numElements); | ||
|
||
// Allocate the managed output vector C | ||
cuda_managed_ptr<float_type> output_vector(numElements); | ||
|
||
// Initialize the input vectors | ||
for (int i = 0; i < numElements; ++i) | ||
{ | ||
input_vector1[i] = rand()/(float_type)RAND_MAX; | ||
input_vector2[i] = rand()/(float_type)RAND_MAX; | ||
} | ||
|
||
// Launch the Vector Add CUDA Kernel | ||
int threadsPerBlock = 256; | ||
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; | ||
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; | ||
|
||
watch w; | ||
|
||
cuda_test<<<blocksPerGrid, threadsPerBlock>>>(input_vector1.get(), input_vector2.get(), output_vector.get(), numElements); | ||
cudaDeviceSynchronize(); | ||
|
||
std::cout << "CUDA kernal done in: " << w.elapsed() << "s" << std::endl; | ||
|
||
err = cudaGetLastError(); | ||
|
||
if (err != cudaSuccess) | ||
{ | ||
std::cerr << "Failed to launch CUDA kernel (error code " << cudaGetErrorString(err) << ")!" << std::endl; | ||
return EXIT_FAILURE; | ||
} | ||
|
||
// Verify that the result vector is correct | ||
std::vector<float_type> results; | ||
results.reserve(numElements); | ||
w.reset(); | ||
for(int i = 0; i < numElements; ++i) | ||
results.push_back(boost::math::hypergeometric_pFq(std::initializer_list<float_type>({input_vector1[i]}), std::initializer_list<float_type>({input_vector2[i]}), static_cast<float_type>(1))); | ||
double t = w.elapsed(); | ||
// check the results | ||
for(int i = 0; i < numElements; ++i) | ||
{ | ||
if (boost::math::epsilon_difference(output_vector[i], results[i]) > 10) | ||
{ | ||
std::cerr << "Result verification failed at element " << i << "!" << std::endl; | ||
return EXIT_FAILURE; | ||
} | ||
} | ||
|
||
std::cout << "Test PASSED, normal calculation time: " << t << "s" << std::endl; | ||
std::cout << "Done\n"; | ||
|
||
return 0; | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
|
||
// Copyright John Maddock 2016. | ||
// Copyright Matt Borland 2024. | ||
// Use, modification and distribution are subject to the | ||
// Boost Software License, Version 1.0. (See accompanying file | ||
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | ||
|
||
#include <iostream> | ||
#include <iomanip> | ||
#include <vector> | ||
#include <boost/math/special_functions.hpp> | ||
#include "cuda_managed_ptr.hpp" | ||
#include "stopwatch.hpp" | ||
|
||
// For the CUDA runtime routines (prefixed with "cuda_") | ||
#include <cuda_runtime.h> | ||
|
||
typedef float float_type; | ||
|
||
/** | ||
* CUDA Kernel Device code | ||
* | ||
*/ | ||
__global__ void cuda_test(const float_type *in1, const float_type *in2, float_type *out, int numElements) | ||
{ | ||
using std::cos; | ||
int i = blockDim.x * blockIdx.x + threadIdx.x; | ||
|
||
if (i < numElements) | ||
{ | ||
out[i] = boost::math::hypergeometric_pFq(std::initializer_list<float_type>({in1[i]}), std::initializer_list<float_type>({in2[i]}), static_cast<float_type>(1)); | ||
} | ||
} | ||
|
||
/** | ||
* Host main routine | ||
*/ | ||
int main(void) | ||
{ | ||
// Error code to check return values for CUDA calls | ||
cudaError_t err = cudaSuccess; | ||
|
||
// Print the vector length to be used, and compute its size | ||
int numElements = 50000; | ||
std::cout << "[Vector operation on " << numElements << " elements]" << std::endl; | ||
|
||
// Allocate the managed input vector A | ||
cuda_managed_ptr<float_type> input_vector1(numElements); | ||
|
||
// Allocate the managed input vector B | ||
cuda_managed_ptr<float_type> input_vector2(numElements); | ||
|
||
// Allocate the managed output vector C | ||
cuda_managed_ptr<float_type> output_vector(numElements); | ||
|
||
// Initialize the input vectors | ||
for (int i = 0; i < numElements; ++i) | ||
{ | ||
input_vector1[i] = rand()/(float_type)RAND_MAX; | ||
input_vector2[i] = rand()/(float_type)RAND_MAX; | ||
} | ||
|
||
// Launch the Vector Add CUDA Kernel | ||
int threadsPerBlock = 256; | ||
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; | ||
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; | ||
|
||
watch w; | ||
|
||
cuda_test<<<blocksPerGrid, threadsPerBlock>>>(input_vector1.get(), input_vector2.get(), output_vector.get(), numElements); | ||
cudaDeviceSynchronize(); | ||
|
||
std::cout << "CUDA kernal done in: " << w.elapsed() << "s" << std::endl; | ||
|
||
err = cudaGetLastError(); | ||
|
||
if (err != cudaSuccess) | ||
{ | ||
std::cerr << "Failed to launch CUDA kernel (error code " << cudaGetErrorString(err) << ")!" << std::endl; | ||
return EXIT_FAILURE; | ||
} | ||
|
||
// Verify that the result vector is correct | ||
std::vector<float_type> results; | ||
results.reserve(numElements); | ||
w.reset(); | ||
for(int i = 0; i < numElements; ++i) | ||
results.push_back(boost::math::hypergeometric_pFq(std::initializer_list<float_type>({input_vector1[i]}), std::initializer_list<float_type>({input_vector2[i]}), static_cast<float_type>(1))); | ||
double t = w.elapsed(); | ||
// check the results | ||
for(int i = 0; i < numElements; ++i) | ||
{ | ||
if (boost::math::epsilon_difference(output_vector[i], results[i]) > 10) | ||
{ | ||
std::cerr << "Result verification failed at element " << i << "!" << std::endl; | ||
return EXIT_FAILURE; | ||
} | ||
} | ||
|
||
std::cout << "Test PASSED, normal calculation time: " << t << "s" << std::endl; | ||
std::cout << "Done\n"; | ||
|
||
return 0; | ||
} |