Blame view

3rdparty/boost_1_81_0/libs/fiber/doc/cuda.qbk 4.21 KB
977ed18d   Hu Chunming   提交三方库
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  [/
            Copyright Oliver Kowalke 2017.
   Distributed under the Boost Software License, Version 1.0.
      (See accompanying file LICENSE_1_0.txt or copy at
            http://www.boost.org/LICENSE_1_0.txt
  ]
  
  [#cuda]
  [section:cuda CUDA]
  
  [@http://developer.nvidia.com/cuda-zone/ CUDA (Compute Unified Device Architecture)] is a platform for parallel computing
  on NVIDIA GPUs. The application programming interface of CUDA gives access to
  GPU's instruction set and computation resources (Execution of compute kernels).
  
  
  [heading Synchronization with CUDA streams]
  
  CUDA operation such as compute kernels or memory transfer (between host and
  device) can be grouped/queued by CUDA streams. are executed on the GPUs.
  Boost.Fiber enables a fiber to sleep (suspend) till a CUDA stream has completed
  its operations. This enables applications to run other fibers on the CPU without
  the need to spawn an additional OS-threads. And resume the fiber when the CUDA
  streams has finished.
  
          __global__
          void kernel( int size, int * a, int * b, int * c) {
              int idx = threadIdx.x + blockIdx.x * blockDim.x;
              if ( idx < size) {
                  int idx1 = (idx + 1) % 256;
                  int idx2 = (idx + 2) % 256;
                  float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
                  float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
                  c[idx] = (as + bs) / 2;
              }
          }
  
          boost::fibers::fiber f([&done]{
              cudaStream_t stream;
              cudaStreamCreate( & stream);
              int size = 1024 * 1024;
              int full_size = 20 * size;
              int * host_a, * host_b, * host_c;
              cudaHostAlloc( & host_a, full_size * sizeof( int), cudaHostAllocDefault);
              cudaHostAlloc( & host_b, full_size * sizeof( int), cudaHostAllocDefault);
              cudaHostAlloc( & host_c, full_size * sizeof( int), cudaHostAllocDefault);
              int * dev_a, * dev_b, * dev_c;
              cudaMalloc( & dev_a, size * sizeof( int) );
              cudaMalloc( & dev_b, size * sizeof( int) );
              cudaMalloc( & dev_c, size * sizeof( int) );
              std::minstd_rand generator;
              std::uniform_int_distribution<> distribution(1, 6);
              for ( int i = 0; i < full_size; ++i) {
                  host_a[i] = distribution( generator);
                  host_b[i] = distribution( generator);
              }
              for ( int i = 0; i < full_size; i += size) {
                  cudaMemcpyAsync( dev_a, host_a + i, size * sizeof( int), cudaMemcpyHostToDevice, stream);
                  cudaMemcpyAsync( dev_b, host_b + i, size * sizeof( int), cudaMemcpyHostToDevice, stream);
                  kernel<<< size / 256, 256, 0, stream >>>( size, dev_a, dev_b, dev_c);
                  cudaMemcpyAsync( host_c + i, dev_c, size * sizeof( int), cudaMemcpyDeviceToHost, stream);
              }
              auto result = boost::fibers::cuda::waitfor_all( stream); // suspend fiber till CUDA stream has finished
              BOOST_ASSERT( stream == std::get< 0 >( result) );
              BOOST_ASSERT( cudaSuccess == std::get< 1 >( result) );
              std::cout << "f1: GPU computation finished" << std::endl;
              cudaFreeHost( host_a);
              cudaFreeHost( host_b);
              cudaFreeHost( host_c);
              cudaFree( dev_a);
              cudaFree( dev_b);
              cudaFree( dev_c);
              cudaStreamDestroy( stream);
          });
          f.join();
  
  
  [heading Synopsis]
  
          #include <boost/fiber/cuda/waitfor.hpp>
  
          namespace boost {
          namespace fibers {
          namespace cuda {
  
          std::tuple< cudaStream_t, cudaError_t > waitfor_all( cudaStream_t st);
          std::vector< std::tuple< cudaStream_t, cudaError_t > > waitfor_all( cudaStream_t ... st);
  
          }}}
  
  
  [ns_function_heading cuda..waitfor]
  
      #include <boost/fiber/cuda/waitfor.hpp>
  
      namespace boost {
      namespace fibers {
      namespace cuda {
  
      std::tuple< cudaStream_t, cudaError_t > waitfor_all( cudaStream_t st);
      std::vector< std::tuple< cudaStream_t, cudaError_t > > waitfor_all( cudaStream_t ... st);
  
      }}}
  
  [variablelist
  [[Effects:] [Suspends active fiber till CUDA stream has finished its operations.]]
  [[Returns:] [tuple of stream reference and the CUDA stream status]]
  ]
  
  
  [endsect]