forked from rai-prashanna/parallelprogramming
-
Notifications
You must be signed in to change notification settings - Fork 0
/
prai-working.c
228 lines (169 loc) · 5.7 KB
/
prai-working.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAYSIZE 1600000
#define MASTER 0
unsigned long* primes;
/* Returns the index of the next prime (non-zero, since we "knock out" non-primes by setting their value to zero) */
unsigned long getNextPrime(unsigned long i, unsigned long *x)
{
while (*(x + i) == 0)
{
i=i+1;
}
return i;
}
unsigned long *init(unsigned long *primes,unsigned long NPRIMES)
{
/* we start at 2 because it is the smallest prime */
/*list all numbers from 2 to max */
for(unsigned long i=0; i < NPRIMES; i++)
{
*(primes +i) = i;
}
printf("init of primes[] \n");
return primes;
}
unsigned long *parallelfilterPrimes(unsigned long *primes,unsigned long start_index, unsigned long maxlimit,unsigned long end_index)
{
for(unsigned long i=start_index; i <= end_index; i =getNextPrime(i+1,primes))
{
for(unsigned long j = (i*2); j <= maxlimit; j += i)
{
*(primes +j) = 0;
}
}
return primes;
}
unsigned long *update(unsigned long *primes,int myoffset, int chunk, int myid, int numtasks)
{
int start_index, end_index,i,j;
float mysum;
/* Perform addition to each of my array elements and keep my sum */
mysum = 0;
/*make iterations from 2 to NPRIMES and update counter with next prime number*/
if(myid==0)
{
start_index=0;
end_index = chunk;
}
else if (myid == numtasks - 1)
{
start_index = myid * chunk+1;
end_index = ARRAYSIZE;
}
else
{
start_index = myid * chunk+1;
end_index = (myid+1)*chunk;
}
printf("inside update function before prime with task id = %d \n",myid);
primes=parallelfilterPrimes(primes,start_index,ARRAYSIZE,end_index);
printf("\n");
printf("outside filter function before prime with task id = %d \n",myid);
printf("\n");
printf("Sample results: \n");
int offset = 0;
for (int i=0; i<numtasks; i++)
{
for (int j=0; j<5; j++)
printf("Prime number: %ld\n", *(primes +(offset+j)));
printf("\n");
offset = offset + chunk;
}
return primes;
}
int main (int argc, char *argv[])
{
int numtasks, taskid, rc, dest, offset, i, j, tag1,
tag2, source, chunksize;
float mysum, sum;
unsigned long* primes;
primes = (unsigned long*)malloc(ARRAYSIZE * sizeof(unsigned long));
//float update(int myoffset, int chunk, int myid);
MPI_Status status;
/***** Initializations *****/
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks % 4 != 0)
{
printf("Quitting. Number of MPI tasks must be divisible by 4.\n");
MPI_Abort(MPI_COMM_WORLD, rc);
exit(0);
}
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
// printf ("MPI task %d has started...\n", taskid);
chunksize = (ARRAYSIZE / numtasks);
tag2 = 1;
tag1 = 2;
/***** Master task only ******/
if (taskid == MASTER)
{
/* Initialize the array */
// sum = 0;
// for(i=0; i<ARRAYSIZE; i++)
// {
// data[i] = i * 1.0;
// sum = sum + data[i];
// }
primes=init(primes,ARRAYSIZE);
printf("Initialized array sum = %e\n",sum);
/* Send each task its portion of the array - master keeps 1st part */
offset = chunksize;
for (dest=1; dest<numtasks; dest++)
{
MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
MPI_Send(primes, ARRAYSIZE, MPI_FLOAT, dest, tag2, MPI_COMM_WORLD);
// printf("Sent %d elements to task %d offset= %d\n",chunksize,dest,offset);
offset = offset + chunksize;
}
/* Master does its part of the work */
offset = 0;
// mysum = update(offset, chunksize, taskid);
/* Wait to receive results from each task */
for (i=1; i<numtasks; i++)
{
source = i;
MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
MPI_Recv(primes, ARRAYSIZE, MPI_FLOAT, source, tag2,
MPI_COMM_WORLD, &status);
{
//printf("Task with task id %d array element = %f\n",source,(primes +i));
}
}
printf("Got it \n");
/* Get final sum and print sample results */
// MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
//// printf("Sample results: \n");
// offset = 0;
// for (i=0; i<numtasks; i++)
// {
// for (j=0; j<5; j++)
// {
// // printf(" %e",data[offset+j]);
// // printf("\n");
// offset = offset + chunksize;
//
// }
// }
// printf("*** Final sum= %e ***\n",sum);
/* Get final sum and print sample results */
// printf("*** Final sum= %e ***\n",sum);
} /* end of master section */
/***** Non-master tasks only *****/
if (taskid > MASTER)
{
/* Receive my portion of array from the master task */
source = MASTER;
MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
MPI_Recv(primes, ARRAYSIZE, MPI_FLOAT, source, tag2,
MPI_COMM_WORLD, &status);
primes = update(primes,offset, chunksize, taskid,numtasks);
/* Send my results back to the master task */
dest = MASTER;
MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
MPI_Send(primes, ARRAYSIZE, MPI_FLOAT, MASTER, tag2, MPI_COMM_WORLD);
//MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
} /* end of non-master */
MPI_Finalize();
} /* end of main */