8
8
#define OutputSize 26
9
9
10
10
int main (int argc, char * argv[])
11
- {
11
+ {
12
12
// mpi variables
13
13
int status, rank, size, nProc = 0 ;
14
- double t1, t2;
14
+ double t1 = 0 , t2 = 0 ;
15
15
// usual variables
16
16
int cols,
17
17
rows,
18
18
arrSize,
19
19
N; // number of elems that will be given to one process
20
- double *arr, // matrix
20
+ double *arr = NULL , // matrix
21
21
*buff, // buffer for messages
22
22
partialSum = 0 ,
23
23
arrSum = 0 ;
24
-
24
+
25
25
// read argv
26
26
if (argc > 2 )
27
27
{
@@ -34,27 +34,27 @@ int main(int argc, char* argv[])
34
34
35
35
// mpi part
36
36
status = MPI_Init (&argc, &argv);
37
- assert (status == MPI_SUCCESS);
37
+ if (status != MPI_SUCCESS) { return - 1 ; }
38
38
39
39
status = MPI_Comm_rank (MPI_COMM_WORLD, &rank);
40
- assert (status == MPI_SUCCESS);
40
+ if (status != MPI_SUCCESS) { return - 1 ; }
41
41
42
42
status = MPI_Comm_size (MPI_COMM_WORLD, &size);
43
- assert (status == MPI_SUCCESS);
43
+ if (status != MPI_SUCCESS) { return -1 ; }
44
+
45
+ if (size > 64 ) return -1 ; // limit
44
46
45
- if (size > 64 ) return -1 ; // limit
46
-
47
47
nProc = (arrSize >= size) ? size : arrSize;
48
48
N = arrSize / nProc;
49
-
49
+
50
50
if (rank == MainProc) // sending
51
- {
51
+ {
52
52
// init arr
53
53
arr = new double [arrSize];
54
54
std::srand ((unsigned )time (NULL ));
55
55
for (int i = 0 ; i < arrSize; i++)
56
56
arr[i] = (std::rand ()%100 )/100.0 ;
57
-
57
+
58
58
if (arrSize < OutputSize)
59
59
{
60
60
for (int i = 0 ; i < arrSize; i++)
@@ -66,23 +66,23 @@ int main(int argc, char* argv[])
66
66
}
67
67
std::cout << std::endl;
68
68
buff = arr;
69
-
69
+
70
70
// sequential part
71
- t1 = MPI_Wtime ();
71
+ t1 = MPI_Wtime ();
72
72
for (int i = 0 ; i < arrSize; i++)
73
73
arrSum += arr[i];
74
74
t2 = MPI_Wtime ();
75
75
std::cout << " Sequential Time: " << t2 - t1 << std::endl;
76
76
std::cout << " Sequential Sum = " << arrSum << std::endl;
77
77
// end sequential part
78
-
79
- t1 = MPI_Wtime (); // start
78
+
79
+ t1 = MPI_Wtime (); // start
80
80
for (int i = 1 ; i < nProc - 1 ; i++)
81
81
status = MPI_Send (&arr[i*N], N, MPI_DOUBLE, i, i, MPI_COMM_WORLD);
82
82
83
83
if (size != 1 ) // sending last part separately in case if arrSize % size != 0
84
84
MPI_Send (&arr[N*(nProc - 1 )], arrSize - N * (nProc - 1 ), MPI_DOUBLE, nProc - 1 , nProc - 1 , MPI_COMM_WORLD);
85
-
85
+
86
86
87
87
}
88
88
else // receiving
@@ -100,16 +100,16 @@ int main(int argc, char* argv[])
100
100
}
101
101
}
102
102
103
- // common part
103
+ // common part
104
104
for (int i = 0 ; i < N; i++)
105
105
partialSum += buff[i];
106
-
106
+
107
107
// sum
108
108
if (rank == MainProc)
109
- {
109
+ {
110
110
arrSum = 0 ;
111
111
arrSum += partialSum;
112
-
112
+
113
113
for (int i = 1 ; i < size; i++)
114
114
{
115
115
MPI_Recv (&partialSum, 1 , MPI_DOUBLE, MPI_ANY_SOURCE, 0 , MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
@@ -124,14 +124,14 @@ int main(int argc, char* argv[])
124
124
{
125
125
MPI_Send (&partialSum, 1 , MPI_DOUBLE, MainProc, 0 , MPI_COMM_WORLD);
126
126
}
127
-
127
+
128
128
// del
129
129
if (rank == 0 )
130
130
delete[] arr;
131
131
else
132
132
delete[] buff;
133
133
134
134
status = MPI_Finalize ();
135
- assert (status == MPI_SUCCESS);
135
+ if (status != MPI_SUCCESS) { return - 1 ; }
136
136
return 0 ;
137
137
}
0 commit comments