Commit c6598769 authored by Johannes Blaschke's avatar Johannes Blaschke

clean up some trailing spaces

parent f14066c3
Pipeline #46 passed with stage
in 2 minutes and 22 seconds
......@@ -55,10 +55,10 @@ class MPICommunicator {
std::vector<int> n_out;
std::vector<int> n_in;
int buffer_size;
std::vector<std::unique_ptr<T[]>> out_buffers;
void grow(int new_size){
if(new_size <= buffer_size)
return;
......@@ -98,7 +98,7 @@ class MPICommunicator {
MPI_Status stat[2 * n_dim];
for(int i = 0; i < n_dim; i++){
MPI_Isend( out_buffers[i].get(), n_out[i], MPI_T, i,
MPI_Isend( out_buffers[i].get(), n_out[i], MPI_T, i,
1, MPI_COMM_WORLD, & req[req_count]);
req_count += 1;
}
......@@ -134,18 +134,18 @@ class MPICommunicator {
for (int & n : n_out)
n = 0;
}
~MPICommunicator(){
for(auto & buffer : out_buffers)
buffer.reset();
incoming_buffer.reset();
}
void set_outgoing(int dest, T data[], int n_data){
while(n_out[dest] + n_data > buffer_size)
grow(2 * buffer_size);
T * buffer = out_buffers[dest].get();
std::copy(& data[0], & data[n_data], & buffer[ n_out[dest] ]);
n_out[dest] += n_data;
......@@ -177,7 +177,7 @@ class MPIGridCommunicator : public MPICommunicator<T, MPI_T>{
using MPICommunicator<T, MPI_T>::n_out;
using MPICommunicator<T, MPI_T>::n_in;
using MPICommunicator<T, MPI_T>::out_buffers;
virtual void sync_buffer_size(){
int req_count = 0;
MPI_Request req[2 * n_dim];
......@@ -202,7 +202,7 @@ class MPIGridCommunicator : public MPICommunicator<T, MPI_T>{
MPI_Status stat[2 * n_dim];
for(int i = 0; i < n_dim; i++){
MPI_Isend( out_buffers[i].get(), n_out[i], MPI_T, mpi::neighbours[i],
MPI_Isend( out_buffers[i].get(), n_out[i], MPI_T, mpi::neighbours[i],
1, mpi::cart_comm, & req[req_count]);
req_count += 1;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment