#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include "../../test/test_util.h"
using namespace cub;
bool g_verbose = false;
int g_timing_iterations = 100;
int g_grid_size = 1;
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
__global__ void BlockPrefixSumKernel(
int *d_in,
int *d_out,
clock_t *d_elapsed)
{
__shared__ union TempStorage
{
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int data[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load).Load(d_in, data);
__syncthreads();
clock_t start = clock();
int aggregate;
BlockScanT(temp_storage.scan).ExclusiveSum(data, data, aggregate);
clock_t stop = clock();
__syncthreads();
BlockStoreT(temp_storage.store).Store(d_out, data);
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate;
}
}
int Initialize(
int *h_in,
int *h_reference,
int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
h_reference[i] = inclusive;
inclusive += h_in[i];
}
return inclusive;
}
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
int *h_in = new int[TILE_SIZE];
int *h_reference = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE);
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
cudaMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1));
cudaMalloc((void**)&d_elapsed, sizeof(clock_t));
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
int max_sm_occupancy;
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
printf("BlockScan algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
printf("\tOutput items: ");
int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tAggregate: ");
compare = CompareDeviceResults(&h_aggregate, d_out + TILE_SIZE, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
timer.Start();
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
clock_t clocks;
CubDebugExit(cudaMemcpy(&clocks, d_elapsed,
sizeof(clock_t), cudaMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockScan::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockScan::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_gpu) delete[] h_gpu;
if (d_in) cudaFree(d_in);
if (d_out) cudaFree(d_out);
if (d_elapsed) cudaFree(d_elapsed);
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations (default:%d)>]"
"[--grid-size=<grid size (default:%d)>]"
"[--v] "
"\n", argv[0], g_timing_iterations, g_grid_size);
exit(0);
}
Test<1024, 1, BLOCK_SCAN_RAKING>();
Test<512, 2, BLOCK_SCAN_RAKING>();
Test<256, 4, BLOCK_SCAN_RAKING>();
Test<128, 8, BLOCK_SCAN_RAKING>();
Test<64, 16, BLOCK_SCAN_RAKING>();
Test<32, 32, BLOCK_SCAN_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<512, 2, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<256, 4, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<128, 8, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<64, 16, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<32, 32, BLOCK_SCAN_RAKING_MEMOIZE>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_WARP_SCANS>();
Test<512, 2, BLOCK_SCAN_WARP_SCANS>();
Test<256, 4, BLOCK_SCAN_WARP_SCANS>();
Test<128, 8, BLOCK_SCAN_WARP_SCANS>();
Test<64, 16, BLOCK_SCAN_WARP_SCANS>();
Test<32, 32, BLOCK_SCAN_WARP_SCANS>();
return 0;
}