diff --git a/README.md b/README.md index 0e38ddb..b058605 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,108 @@ CUDA Stream Compaction **University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2** -* (TODO) YOUR NAME HERE - * (TODO) [LinkedIn](), [personal website](), [twitter](), etc. -* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab) +* Yilin Liu + * [LinkedIn](https://www.linkedin.com/in/yilin-liu-9538ba1a5/) + * [Personal website](https://www.yilin.games) +* Tested on personal laptop: + - Windows 10, Intel(R) Core(TM), i7-10750H CPU @ 2.60GHz 2.59 GHz, RTX 2070 Max-Q 8GB -### (TODO: Your README) +## Features +This project uses CUDA to implement and improve a number of parallelized scan and stream compaction algorithms. Following features have been implemented: -Include analysis, etc. (Remember, this is public, so don't put -anything here that you don't want to share with the world.) +- Scan: calculate the prefix sum (using arbitrary operator) of an array + - CPU scan with/without simulating parallelized scan + + - GPU naive scan + + - GPU work-efficient scan + + - GPU scan using `thrust::exclusive_scan` + +- Stream compaction: remove elements that unmeet specific conditions from an array, and keep the rest compact in memory + + - CPU stream compaction with/without CPU scan + + - GPU stream compaction using the work-efficient scan + + + + +## Reflection + +* Based on array size of 33554432 (2^25), I optimized each method by selecting the block size which could provide best performance. + + | Methods | Optimized Block Size | + | ------------- | ------------- | + | Naive Scan | 128 | + | Work Efficient | 512 | + | Thrust | 512 | + + +* Based on the optimized block size, I compare all of these GPU Scan implementations (Naive, Work-Efficient, and Thrust) to the serial CPU version of scan. + + |![image](./img/table1.png)| + |:--:| + | *Comparision of Scan Methods vs Time* | +* From the figure above, we can see that there is no significant differences between CPU, Naive and Work Efficient method while the array size is less than 15M, although Work Efficient method is slightly faster than the other two. However, as the array size increases, work efficient method is much faster while other two continue to grow linearly. On the other side, the Thrust method has always been the fastest method and take almost constant time as array increases. +* The naive and work efficient method are slow since GPU has to read from global memory, which is very costly. The reason why Thrust library is super fast could probably be that it saves copy/paste cost among kernels. + + |![image](./img/table2.png)| + |:--:| + | *Comparision of Compaction Methods vs Time* | +* For the stream compaction algorithm, the GPU method surpassed all CPU mthods. The stream compaction method using scan is the slowest one since it has to do more operations from global memory. + + + + +## Example Output for Array Size of 2^25 +``` + +**************** +** SCAN TESTS ** +**************** + [ 24 27 7 15 47 21 25 25 4 30 41 18 28 ... 11 0 ] +==== cpu scan, power-of-two ==== + elapsed time: 49.3806ms (std::chrono Measured) +==== cpu scan, non-power-of-two ==== + elapsed time: 48.9979ms (std::chrono Measured) + passed +==== naive scan, power-of-two ==== + elapsed time: 52.6029ms (CUDA Measured) + passed +==== naive scan, non-power-of-two ==== + elapsed time: 50.8384ms (CUDA Measured) + passed +==== work-efficient scan, power-of-two ==== + elapsed time: 21.571ms (CUDA Measured) + passed +==== work-efficient scan, non-power-of-two ==== + elapsed time: 22.2183ms (CUDA Measured) + passed +==== thrust scan, power-of-two ==== + elapsed time: 1.02707ms (CUDA Measured) + passed +==== thrust scan, non-power-of-two ==== + elapsed time: 1.06106ms (CUDA Measured) + passed + +***************************** +** STREAM COMPACTION TESTS ** +***************************** + [ 2 2 3 3 2 2 0 1 3 0 2 2 0 ... 2 0 ] +==== cpu compact without scan, power-of-two ==== + elapsed time: 74.2421ms (std::chrono Measured) + passed +==== cpu compact without scan, non-power-of-two ==== + elapsed time: 74.3851ms (std::chrono Measured) + passed +==== cpu compact with scan ==== + elapsed time: 166.162ms (std::chrono Measured) + passed +==== work-efficient compact, power-of-two ==== + elapsed time: 26.1186ms (CUDA Measured) + passed +==== work-efficient compact, non-power-of-two ==== + elapsed time: 26.5175ms (CUDA Measured) +``` diff --git a/img/table1.png b/img/table1.png new file mode 100644 index 0000000..a7f9318 Binary files /dev/null and b/img/table1.png differ diff --git a/img/table2.png b/img/table2.png new file mode 100644 index 0000000..642a0e8 Binary files /dev/null and b/img/table2.png differ diff --git a/src/main.cpp b/src/main.cpp index 896ac2b..8ae4de4 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -5,6 +5,7 @@ * @date 2015 * @copyright University of Pennsylvania */ +#include #include #include @@ -13,15 +14,14 @@ #include #include "testing_helpers.hpp" -const int SIZE = 1 << 8; // feel free to change the size of array +const int SIZE = 1 << 26; // feel free to change the size of array const int NPOT = SIZE - 3; // Non-Power-Of-Two int *a = new int[SIZE]; int *b = new int[SIZE]; int *c = new int[SIZE]; - -int main(int argc, char* argv[]) { +void scanTest() { // Scan tests - + std::vector timeVector; printf("\n"); printf("****************\n"); printf("** SCAN TESTS **\n"); @@ -37,20 +37,20 @@ int main(int argc, char* argv[]) { zeroArray(SIZE, b); printDesc("cpu scan, power-of-two"); StreamCompaction::CPU::scan(SIZE, b, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); - printArray(SIZE, b, true); + printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), timeVector, "(std::chrono Measured)"); + //printArray(SIZE, b, true); zeroArray(SIZE, c); printDesc("cpu scan, non-power-of-two"); StreamCompaction::CPU::scan(NPOT, c, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); - printArray(NPOT, b, true); + printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), timeVector, "(std::chrono Measured)"); + //printArray(NPOT, b, true); printCmpResult(NPOT, b, c); zeroArray(SIZE, c); printDesc("naive scan, power-of-two"); StreamCompaction::Naive::scan(SIZE, c, a); - printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(SIZE, c, true); printCmpResult(SIZE, b, c); @@ -63,35 +63,35 @@ int main(int argc, char* argv[]) { zeroArray(SIZE, c); printDesc("naive scan, non-power-of-two"); StreamCompaction::Naive::scan(NPOT, c, a); - printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(SIZE, c, true); printCmpResult(NPOT, b, c); zeroArray(SIZE, c); printDesc("work-efficient scan, power-of-two"); StreamCompaction::Efficient::scan(SIZE, c, a); - printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(SIZE, c, true); printCmpResult(SIZE, b, c); zeroArray(SIZE, c); printDesc("work-efficient scan, non-power-of-two"); StreamCompaction::Efficient::scan(NPOT, c, a); - printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(NPOT, c, true); printCmpResult(NPOT, b, c); zeroArray(SIZE, c); printDesc("thrust scan, power-of-two"); StreamCompaction::Thrust::scan(SIZE, c, a); - printElapsedTime(StreamCompaction::Thrust::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Thrust::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(SIZE, c, true); printCmpResult(SIZE, b, c); zeroArray(SIZE, c); printDesc("thrust scan, non-power-of-two"); StreamCompaction::Thrust::scan(NPOT, c, a); - printElapsedTime(StreamCompaction::Thrust::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Thrust::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(NPOT, c, true); printCmpResult(NPOT, b, c); @@ -113,42 +113,51 @@ int main(int argc, char* argv[]) { zeroArray(SIZE, b); printDesc("cpu compact without scan, power-of-two"); count = StreamCompaction::CPU::compactWithoutScan(SIZE, b, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); + printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), timeVector, "(std::chrono Measured)"); expectedCount = count; - printArray(count, b, true); + //printArray(count, b, true); printCmpLenResult(count, expectedCount, b, b); zeroArray(SIZE, c); printDesc("cpu compact without scan, non-power-of-two"); count = StreamCompaction::CPU::compactWithoutScan(NPOT, c, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); + printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), timeVector, "(std::chrono Measured)"); expectedNPOT = count; - printArray(count, c, true); + //printArray(count, c, true); printCmpLenResult(count, expectedNPOT, b, c); zeroArray(SIZE, c); printDesc("cpu compact with scan"); count = StreamCompaction::CPU::compactWithScan(SIZE, c, a); - printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)"); - printArray(count, c, true); + printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), timeVector, "(std::chrono Measured)"); + //printArray(count, c, true); printCmpLenResult(count, expectedCount, b, c); zeroArray(SIZE, c); printDesc("work-efficient compact, power-of-two"); count = StreamCompaction::Efficient::compact(SIZE, c, a); - printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(count, c, true); printCmpLenResult(count, expectedCount, b, c); zeroArray(SIZE, c); printDesc("work-efficient compact, non-power-of-two"); count = StreamCompaction::Efficient::compact(NPOT, c, a); - printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)"); + printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), timeVector, "(CUDA Measured)"); //printArray(count, c, true); printCmpLenResult(count, expectedNPOT, b, c); + + for (int i = 0; i < timeVector.size(); i++) + { + std::cout << timeVector[i] << std::endl; + } system("pause"); // stop Win32 console from closing on exit delete[] a; delete[] b; delete[] c; } + +int main(int argc, char* argv[]) { + scanTest(); +} diff --git a/src/testing_helpers.hpp b/src/testing_helpers.hpp index 025e94a..e5d63c2 100644 --- a/src/testing_helpers.hpp +++ b/src/testing_helpers.hpp @@ -5,7 +5,7 @@ #include #include #include - +#include template int cmpArrays(int n, T *a, T *b) { for (int i = 0; i < n; i++) { @@ -68,9 +68,10 @@ void printArray(int n, int *a, bool abridged = false) { } printf("]\n"); } - -template -void printElapsedTime(T time, std::string note = "") +// +//template +void printElapsedTime(float time, std::vector& timeArray, std::string note = "") { std::cout << " elapsed time: " << time << "ms " << note << std::endl; + timeArray.push_back(time); } diff --git a/stream_compaction/common.cu b/stream_compaction/common.cu index 2ed6d63..ba9a3a8 100644 --- a/stream_compaction/common.cu +++ b/stream_compaction/common.cu @@ -24,6 +24,11 @@ namespace StreamCompaction { */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + bools[index] = idata[index] ? 1 : 0; } /** @@ -33,6 +38,13 @@ namespace StreamCompaction { __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (bools[index]) { + odata[indices[index]] = idata[index]; + } } } diff --git a/stream_compaction/common.h b/stream_compaction/common.h index d2c1fed..119b5ac 100644 --- a/stream_compaction/common.h +++ b/stream_compaction/common.h @@ -13,6 +13,8 @@ #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) +#define BLOCK_SIZE 128 + /** * Check for CUDA errors; print and exit if there was a problem. */ diff --git a/stream_compaction/cpu.cu b/stream_compaction/cpu.cu index 719fa11..56daada 100644 --- a/stream_compaction/cpu.cu +++ b/stream_compaction/cpu.cu @@ -20,9 +20,20 @@ namespace StreamCompaction { void scan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + odata[0] = 0; + odata[1] = idata[0]; + for (size_t i = 2; i < n; i++) { + odata[i] = odata[i - 1] + idata[i - 1]; + } timer().endCpuTimer(); } - + void scanInclusive(int n, int* odata, const int* idata) { + // TODO + odata[0] = idata[0]; + for (size_t i = 1; i < n; i++) { + odata[i] = odata[i - 1] + idata[i]; + } + } /** * CPU stream compaction without using the scan function. * @@ -31,8 +42,15 @@ namespace StreamCompaction { int compactWithoutScan(int n, int *odata, const int *idata) { timer().startCpuTimer(); // TODO + int j = 0; + for (size_t i = 0; i < n; i++) { + if (idata[i] != 0) { + odata[j] = idata[i]; + j++; + } + } timer().endCpuTimer(); - return -1; + return j; } /** @@ -41,10 +59,25 @@ namespace StreamCompaction { * @returns the number of elements remaining after compaction. */ int compactWithScan(int n, int *odata, const int *idata) { + int* boolFlag = new int[n]; // temporary array + int* scanRes = new int[n]; timer().startCpuTimer(); // TODO + for (size_t i = 0; i < n; i++) + { + boolFlag[i] = idata[i] == 0 ? 0 : 1; + } + scanInclusive(n, scanRes, boolFlag); // odata: scan result + + + for (size_t i = 0; i < n; i++) { + if (boolFlag[i] == 0) continue; + odata[scanRes[i] - 1] = idata[i]; + } + timer().endCpuTimer(); - return -1; + return scanRes[n - 1]; + } } } diff --git a/stream_compaction/efficient.cu b/stream_compaction/efficient.cu index 2db346e..71a5528 100644 --- a/stream_compaction/efficient.cu +++ b/stream_compaction/efficient.cu @@ -2,6 +2,8 @@ #include #include "common.h" #include "efficient.h" +#include "common.cu" +using namespace StreamCompaction::Common; namespace StreamCompaction { namespace Efficient { @@ -11,16 +13,140 @@ namespace StreamCompaction { static PerformanceTimer timer; return timer; } + //Down sweep + __global__ void kernUpSweep(int* g_idata, int n, int d) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + int pow2dplus1 = 1 << d + 1; + int pow2d = 1 << d; // d0 p1, d1 p2 + if (index % pow2dplus1 == 0) { + g_idata[index + pow2dplus1 - 1] += g_idata[index + pow2d - 1]; // + } + //if (index % (1 << (d + 1)) == 0) { + // g_idata[index + (1 << (d + 1)) - 1] += g_idata[index + (1 << d) - 1]; + //} + } + __global__ void kernDownSweep(int* g_idata, int n, int d) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + + //if (index % (1 << (d + 1)) == 0) { + // int t = g_idata[index + (1 << d) - 1]; + // g_idata[index + (1 << d) - 1] = g_idata[index + (1 << (d + 1)) - 1]; + // g_idata[index + (1 << (d + 1)) - 1] += t; + //} + int pow2dplus1 = 1 << d + 1; + int pow2d = 1 << d; // d0 p1, d1 p2 + + if (index % pow2dplus1 == 0) { + int temp = g_idata[index + pow2d - 1]; + g_idata[index + pow2d - 1] = g_idata[index + pow2dplus1 - 1]; + g_idata[index + pow2dplus1 - 1] += temp; + } + } + __global__ void kernScan(const int* idata, int* odata, int n, int d) { + + } + + __global__ void kernSetZero(int n, int* idata) { + idata[n - 1] = 0; + } + + /*__global__ void kernUpSweep(int d, int n, int* idata) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (index % (1 << (d + 1)) == 0) { + idata[index + (1 << (d + 1)) - 1] += idata[index + (1 << d) - 1]; + } + } + + + __global__ void kernDownSweep(int d, int n, int* idata) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + if (index % (1 << (d + 1)) == 0) { + int t = idata[index + (1 << d) - 1]; + idata[index + (1 << d) - 1] = idata[index + (1 << (d + 1)) - 1]; + idata[index + (1 << (d + 1)) - 1] += t; + } + }*/ + + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { + + // allocate memory + int paddedSize = 1 << ilog2ceil(n); + dim3 blocksPerGrid((paddedSize + BLOCK_SIZE - 1) / BLOCK_SIZE); + + int* dev_in; + cudaMalloc((void**)&dev_in, paddedSize * sizeof(int)); + cudaMemcpy(dev_in, idata, sizeof(int) * n, cudaMemcpyHostToDevice); + timer().startGpuTimer(); - // TODO + + // Up Sweep Phase + for (int d = 0; d <= ilog2ceil(paddedSize) - 1; d++) { + kernUpSweep <<< blocksPerGrid, BLOCK_SIZE >>> ( dev_in, paddedSize, d); + checkCUDAError("kernUpSweep failed"); + + } + kernSetZero << < 1, 1 >> > (paddedSize, dev_in); + + // Down Sweep Phase + for (int d = ilog2ceil(paddedSize) - 1; d >= 0; d--) { + kernDownSweep <<< blocksPerGrid, BLOCK_SIZE >>> (dev_in, paddedSize, d); + checkCUDAError("kernDownSweep failed"); + + } timer().endGpuTimer(); + + // send the data to host + cudaMemcpy(odata, dev_in, sizeof(int) * (n), cudaMemcpyDeviceToHost); + cudaFree(dev_in); + } + void scanNoTimer(int n, int* odata, const int* idata) { + + // allocate memory + int paddedSize = 1 << ilog2ceil(n); + dim3 blocksPerGrid((paddedSize + BLOCK_SIZE - 1) / BLOCK_SIZE); + + int* dev_in; + cudaMalloc((void**)&dev_in, paddedSize * sizeof(int)); + cudaMemcpy(dev_in, idata, sizeof(int) * n, cudaMemcpyHostToDevice); + + // Up Sweep Phase + for (int d = 0; d <= ilog2ceil(paddedSize) - 1; d++) { + kernUpSweep << < blocksPerGrid, BLOCK_SIZE >> > (dev_in, paddedSize, d); + checkCUDAError("kernUpSweep failed"); + + } + kernSetZero << < 1, 1 >> > (paddedSize, dev_in); + + // Down Sweep Phase + for (int d = ilog2ceil(paddedSize) - 1; d >= 0; d--) { + kernDownSweep << < blocksPerGrid, BLOCK_SIZE >> > (dev_in, paddedSize, d); + checkCUDAError("kernDownSweep failed"); + + } + + // send the data to host + cudaMemcpy(odata, dev_in, sizeof(int) * (n), cudaMemcpyDeviceToHost); + cudaFree(dev_in); + } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. @@ -31,10 +157,43 @@ namespace StreamCompaction { * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { + dim3 blocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE); + + int* count = new int[2]; + + int* dev_in; + int* dev_bool; + int* dev_ScanRes; + int* dev_out; + cudaMalloc((void**)&dev_in, n * sizeof(int)); + cudaMalloc((void**)&dev_bool, n * sizeof(int)); + cudaMalloc((void**)&dev_ScanRes, n * sizeof(int)); + cudaMalloc((void**)&dev_out, n * sizeof(int)); + + cudaMemcpy(dev_in, idata, sizeof(int) * n, cudaMemcpyHostToDevice); + timer().startGpuTimer(); // TODO + kernMapToBoolean << > > (n, dev_bool, dev_in); + scanNoTimer(n, dev_ScanRes, dev_bool); + kernScatter << > > (n, dev_out, dev_in, dev_bool, dev_ScanRes); timer().endGpuTimer(); - return -1; + + cudaMemcpy(count, &dev_bool[n - 1], sizeof(int), cudaMemcpyDeviceToHost); + //cudaMemcpy(count + 1, dev_ScanRes + n - 1, sizeof(int), cudaMemcpyDeviceToHost); + + //size equals to last of boolean array and last of boolean prefix sum array + int size; + cudaMemcpy(&size, &dev_ScanRes[n - 1], sizeof(int), cudaMemcpyDeviceToHost); // copy the last element of scan result + size += count[0]; + + cudaMemcpy(odata, dev_out, sizeof(int) * size, cudaMemcpyDeviceToHost); + + cudaFree(dev_in); + cudaFree(dev_bool); + cudaFree(dev_ScanRes); + cudaFree(dev_out); + return size; } } } diff --git a/stream_compaction/naive.cu b/stream_compaction/naive.cu index 4308876..33bf0ec 100644 --- a/stream_compaction/naive.cu +++ b/stream_compaction/naive.cu @@ -12,14 +12,69 @@ namespace StreamCompaction { return timer; } // TODO: __global__ + __global__ void kernScan(const int* idata, int* odata, int n, int d) { + int index = threadIdx.x + (blockIdx.x * blockDim.x); + if (index >= n) { + return; + } + + int pow2dminus1 = 1 << d - 1; + if (index >= pow2dminus1) { + odata[index] = idata[index] + idata[index - pow2dminus1]; + } + else { + odata[index] = idata[index]; + } + } + // Part 5 + //__global__ void scan(float* g_odata, float* g_idata, int n) { + // extern __shared__ float temp[]; // allocated on invocation + // int thid = threadIdx.x; + // int pout = 0, pin = 1; + // temp[pout * n + thid] = (thid > 0) ? g_idata[thid - 1] : 0; + // __syncthreads(); + // for (int offset = 1; offset < n; offset *= 2) { + // pout = 1 - pout; + // // swap double buffer indices + // pin = 1 - pout; + // if (thid >= offset) + // temp[pout * n + thid] += temp[pin * n + thid - offset]; + // else + // temp[pout * n + thid] = temp[pin * n + thid]; + // __syncthreads(); + // } + // g_odata[thid] = temp[pout * n + thid]; // write output + //} + /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ - void scan(int n, int *odata, const int *idata) { + void scan(int n, int* odata, const int* idata) { + dim3 blocksPerGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE); + + int* dev_in; + int* dev_out; + cudaMalloc((void**)&dev_in, n * sizeof(int)); + cudaMalloc((void**)&dev_out, n * sizeof(int)); + cudaMemcpy(dev_in, idata, sizeof(int) * n, cudaMemcpyHostToDevice); + timer().startGpuTimer(); - // TODO + + for (int d = 1; d <= ilog2ceil(n); d++) { + kernScan <<< blocksPerGrid, BLOCK_SIZE >>> (dev_in, dev_out, n, d); + //std::swap(dev_in, dev_out); + cudaMemcpy(dev_in, dev_out, sizeof(int) * n, cudaMemcpyDeviceToDevice); + } + // works fine without doing the exclusive shift here as mentioned in GOU Gem Book timer().endGpuTimer(); + + // send the data to host + // shift the output by 1 for exclusive scanc + odata[0] = 0; + cudaMemcpy(odata + 1, dev_in, sizeof(int) * (n - 1), cudaMemcpyDeviceToHost); + cudaFree(dev_in); + cudaFree(dev_out); } } -} +} \ No newline at end of file diff --git a/stream_compaction/thrust.cu b/stream_compaction/thrust.cu index 1def45e..cec21c5 100644 --- a/stream_compaction/thrust.cu +++ b/stream_compaction/thrust.cu @@ -5,6 +5,7 @@ #include #include "common.h" #include "thrust.h" +#include namespace StreamCompaction { namespace Thrust { @@ -18,11 +19,20 @@ namespace StreamCompaction { * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { - timer().startGpuTimer(); // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); + + thrust::host_vector host_in(idata, idata + n); + thrust::device_vector dev_in = host_in; + thrust::device_vector dev_out(n); + + + timer().startGpuTimer(); + thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin()); timer().endGpuTimer(); + thrust::copy(dev_out.begin(), dev_out.end(), odata); + } } }