fork download
  1. /*
  2. The "challenge" is having 2 MPI process do the following in a completely non-blocking way (I just use non-blocking send and recv for exposition, but a similar example using MPI_IProbe could be constructed):
  3.  
  4. Process A:
  5. non-blocking sends size of array
  6. non-blocking send array
  7.  
  8. Process B:
  9. non-blocking receive size of array
  10. non-blocking allocate buffer
  11. non-blocking receive array on buffer
  12. */
  13.  
  14. // In C++ pseudo-code:
  15.  
  16. // Process A:
  17.  
  18. std::vector data {1, 2, 3, 4};
  19. auto request = nb_send(process_b, tag_0, data.size())
  20. .then([]() { // schedules a continuation:
  21. nb_send(process_b, tag_1, data.data(), data.size()); });
  22.  
  23. // application code folows {
  24. // not a single call to mpi test in here:
  25. // the second send is scheduled as soon as the first
  26. // completes
  27. // }
  28.  
  29. request.get(); // blocks until both sends complete
  30.  
  31. // Process B:
  32.  
  33. std::size_t size;
  34. std::vector buffer;
  35. auto request = nb_recv(process_a, tag_0, &size)
  36. .then([&]() { // schedules continuation
  37. // allocate memory for the buffer before the second recv
  38. buffer.resize(size);
  39. b_recv(process_a, tag_1, buffer.data(), size); });
  40. // application code {
  41. // not a single call to test in here:
  42. // as soon as the first recv completes, the continuation
  43. // runs asynchronously and memory is allocated
  44. // and the second recv is scheduled
  45. // } request.get();
  46. // blocks until both recvs complete
  47.  
  48. /*
  49. The main net win is that one fires all the communication logic at the beginning, and blocks at the very end. That is, the unrelated application code in the middle doesn't need to e.g. call a function "every now and then" to just advance the continuation.
  50.  
  51. I don't see how to do this without spawning a different thread that continuously loops over the requests calling MPI test to advance to the next continuation. If one has to spawn a different thread anyways the advantage of non-blocking MPI calls is still there, but it is less important (they allow cool things like having a pool of request + continuations).
  52.  
  53. P.S: One could just have sent the array at once with send, but the receive end would still need to repeatedly call MPI_IProbe before executing the continuation (I just chose the two sends + two receives for exposition purposes).
  54. */
Compilation error #stdin compilation error #stdout 0s 0KB
stdin
Standard input is empty
compilation info
prog.cpp:18:6: error: 'vector' in namespace 'std' does not name a type
 std::vector data {1, 2, 3, 4}; 
      ^
prog.cpp:19:24: error: 'process_b' was not declared in this scope
 auto request = nb_send(process_b, tag_0, data.size())
                        ^
prog.cpp:19:35: error: 'tag_0' was not declared in this scope
 auto request = nb_send(process_b, tag_0, data.size())
                                   ^
prog.cpp:19:42: error: 'data' was not declared in this scope
 auto request = nb_send(process_b, tag_0, data.size())
                                          ^
prog.cpp:19:53: error: 'nb_send' was not declared in this scope
 auto request = nb_send(process_b, tag_0, data.size())
                                                     ^
prog.cpp: In lambda function:
prog.cpp:21:29: error: 'process_b' was not declared in this scope
                     nb_send(process_b, tag_1, data.data(), data.size()); }); 
                             ^
prog.cpp:21:40: error: 'tag_1' was not declared in this scope
                     nb_send(process_b, tag_1, data.data(), data.size()); }); 
                                        ^
prog.cpp:21:47: error: 'data' was not declared in this scope
                     nb_send(process_b, tag_1, data.data(), data.size()); }); 
                                               ^
prog.cpp:21:71: error: 'nb_send' was not declared in this scope
                     nb_send(process_b, tag_1, data.data(), data.size()); }); 
                                                                       ^
prog.cpp: At global scope:
prog.cpp:29:1: error: 'request' does not name a type
 request.get(); // blocks until both sends complete 
 ^
prog.cpp:33:6: error: 'size_t' in namespace 'std' does not name a type
 std::size_t size; 
      ^
prog.cpp:34:6: error: 'vector' in namespace 'std' does not name a type
 std::vector buffer; 
      ^
prog.cpp:35:24: error: 'process_a' was not declared in this scope
 auto request = nb_recv(process_a, tag_0, &size)
                        ^
prog.cpp:35:35: error: 'tag_0' was not declared in this scope
 auto request = nb_recv(process_a, tag_0, &size)
                                   ^
prog.cpp:35:43: error: 'size' was not declared in this scope
 auto request = nb_recv(process_a, tag_0, &size)
                                           ^
prog.cpp:35:47: error: 'nb_recv' was not declared in this scope
 auto request = nb_recv(process_a, tag_0, &size)
                                               ^
prog.cpp: In lambda function:
prog.cpp:38:16: error: 'buffer' was not declared in this scope
                buffer.resize(size); 
                ^
prog.cpp:38:30: error: 'size' was not declared in this scope
                buffer.resize(size); 
                              ^
prog.cpp:39:23: error: 'process_a' was not declared in this scope
                b_recv(process_a, tag_1, buffer.data(), size); }); 
                       ^
prog.cpp:39:34: error: 'tag_1' was not declared in this scope
                b_recv(process_a, tag_1, buffer.data(), size); }); 
                                  ^
prog.cpp:39:60: error: 'b_recv' was not declared in this scope
                b_recv(process_a, tag_1, buffer.data(), size); }); 
                                                            ^
stdout
Standard output is empty