Don't use sections. Don't set the number of threads (use the default). Do this:
#include <stdlib.h>
int func(int *V1, int *V2, int length) {
int result=0;
int i;
#pragma omp parallel for reduction(+:result)
for(i=0;i<length;i++) {
result += V1[i] + V2[i];
}
return result;
}
int main(){
int result1, result2;
int *array_A, *array_B, *array_X, *array_Y;
array_A = malloc(sizeof(int)*1000000);
array_B = malloc(sizeof(int)*1000000);
array_X = malloc(sizeof(int)*2000000);
array_Y = malloc(sizeof(int)*2000000);
result1 = func(array_A,array_B,1000000);
result2 = func(array_X,array_Y,2000000);
//now do something with result1 and result2
return 0;
}
Since the OP insists on dividing the threads between function calls I have come up with a solution. It's not the right approach and it won't be any better than the above code but here it is anyway.
void foo(int *V1, int *V2, int length1, int *V3, int *V4, int length2) {
int result1, result2;
result1=0; result2=0;
#pragma omp parallel
{
int i, ithread, nthreads, start, finish, result_private, *a1, *a2;
ithread = omp_get_thread_num(); nthreads = omp_get_num_threads();
if(ithread<nthreads/2) {
start = ithread*length1/(nthreads/2);
finish = (ithread+1)*length1/(nthreads/2);
a1 = V1; a2 = V2;
}
else {
start = (ithread - nthreads/2)*length2/(nthreads - nthreads/2);
finish = (ithread+1 - nthreads/2)*length2/(nthreads - nthreads/2);
a1 = V3; a2 = V4;
}
result_private = 0;
#pragma omp for nowait
for(i=start; i<finish; i++) {
result_private += a1[i] + a2[i];
}
#pragma omp critical
{
if(ithread<nthreads/2) {
result1 += result_private;
}
else {
result2 += result_private;
}
}
}
}