fork download
  1. #include <mpi.h>
  2. #include <iostream>
  3. #include <vector>
  4. #include <algorithm>
  5.  
  6. int N = 400; // Size of the matrices (e.g., 100x100)
  7.  
  8. void printMatrix(const std::vector<std::vector<int>>& matrix, int rows, int cols) {
  9. for (int i = 0; i < rows; ++i) {
  10. for (int j = 0; j < cols; ++j) {
  11. std::cout << matrix[i][j] << " ";
  12. }
  13. std::cout << std::endl;
  14. }
  15. }
  16.  
  17. int main(int argc, char** argv) {
  18. MPI_Init(&argc, &argv);
  19. int rank, size;
  20. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  21. MPI_Comm_size(MPI_COMM_WORLD, &size);
  22.  
  23. // Define matrices
  24. std::vector<std::vector<int>> matrix1(N, std::vector<int>(N, 1)); // Initialize with 1s for simplicity
  25. std::vector<std::vector<int>> matrix2(N, std::vector<int>(N, 1)); // Initialize with 1s for simplicity
  26. std::vector<std::vector<int>> resultMatrix(N, std::vector<int>(N, 0));
  27.  
  28. if (rank == 0) {
  29. std::cout << "Starting matrix multiplication for " << N << "x" << N << " matrices using point-to-point communication..." << std::endl;
  30. }
  31.  
  32. // Start timer after data distribution
  33. double start_time = MPI_Wtime();
  34.  
  35. // Broadcast matrix2 to all processes (all processes need matrix2 for computation)
  36. for (int i = 0; i < N; ++i) {
  37. MPI_Bcast(matrix2[i].data(), N, MPI_INT, 0, MPI_COMM_WORLD);
  38. }
  39.  
  40. // Calculate rows_per_process and start index for each process
  41. int rows_per_process = N / size;
  42. int extra_rows = N % size; // To handle cases where N is not evenly divisible by size
  43.  
  44. // Assign extra row to processes if N % size != 0
  45. int start_row = rank * rows_per_process + std::min(rank, extra_rows);
  46. int end_row = start_row + rows_per_process + (rank < extra_rows ? 1 : 0);
  47.  
  48. // Each process calculates its assigned rows
  49. for (int i = start_row; i < end_row; ++i) {
  50. for (int j = 0; j < N; ++j) {
  51. resultMatrix[i][j] = 0;
  52. for (int k = 0; k < N; ++k) {
  53. resultMatrix[i][j] += matrix1[i][k] * matrix2[k][j];
  54. }
  55. }
  56. }
  57.  
  58. // Send the computed rows back to the root process (rank 0)
  59. if (rank == 0) {
  60. // Root process gathers its own rows
  61. for (int i = 1; i < size; ++i) {
  62. int start = i * rows_per_process + std::min(i, extra_rows);
  63. int end = start + rows_per_process + (i < extra_rows ? 1 : 0);
  64.  
  65. for (int j = start; j < end; ++j) {
  66. MPI_Recv(resultMatrix[j].data(), N, MPI_INT, i, j, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  67. }
  68. }
  69. }
  70. else {
  71. // Non-root processes send their computed rows to root
  72. for (int i = start_row; i < end_row; ++i) {
  73. MPI_Send(resultMatrix[i].data(), N, MPI_INT, 0, i, MPI_COMM_WORLD);
  74. }
  75. }
  76.  
  77. // End timer
  78. double end_time = MPI_Wtime();
  79.  
  80. // Print the result and elapsed time in the root process
  81. if (rank == 0) {
  82.  
  83.  
  84. printMatrix(matrix1, N, N);
  85. printMatrix(matrix2, N, N);
  86. printMatrix(resultMatrix,N,N);
  87.  
  88. std::cout << "Matrix multiplication complete." << std::endl;
  89. std::cout << "Time taken: " << end_time - start_time << " seconds" << std::endl;
  90. }
  91.  
  92. MPI_Finalize();
  93. return 0;
  94. }
  95.  
Success #stdin #stdout #stderr 0.24s 40660KB
stdin
Standard input is empty
stdout
Standard output is empty
stderr
Error: unexpected symbol in " int N"
Execution halted