-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathfms_diag_averaging_dummy.F90
99 lines (71 loc) · 2.91 KB
/
fms_diag_averaging_dummy.F90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
!! \brief Contains miscellanous temporary and experimental routines
! \description Contents of this file are temporary and experimental and
!! may be deleted without notice. This file is used
!! to support the development of module diag_manager_omp_aux module and its test program
module fms_diag_averaging_dummy
use omp_lib
implicit none
contains
!! Set result to argument xv if its present, otherwise to the alternate
function pr_or_alt(xv, alternate) result (xv_result)
integer , optional, intent (in) :: xv
integer , intent (in) :: alternate
integer :: xv_result
if(present(xv)) then
xv_result = xv
else
xv_result = alternate
end if
end function pr_or_alt
subroutine get_average_5D_R( the_data, the_average)
real (kind = 8), intent(in) :: the_data (:,:,:,:,:)
real(kind=8), intent(in out) :: the_average(:,:,:,:)
integer :: NT,i
NT = size (the_data,1)
the_average = 0.0d0
!$opm declare target
!$omp parallel do reduction(+ : the_average)
do i = 1, NT
the_average(:,:,:,:) = the_average(:,:,:,:) + the_data(i,:,:,:,:)
end do
the_average = the_average / NT
end subroutine get_average_5D_R
!This is a test subroutine.
!Returns the average of an input vector.
subroutine get_average_v1( data, the_average, numThreads)
real (kind = 8), intent(in) :: data (:)
integer (kind=4), intent(in) :: numThreads
real(kind=8), intent (in out) :: the_average
integer (kind=4) N, iter_chunk
real(kind=8) the_sum, local_sum, t1, t2;
real(kind=8) elapsed(0:3)
integer :: i
print *, "Entering subroutine getAverage"
N = size (data)
the_sum = 0;
iter_chunk = 5 !TODO: find rational amount and method of determining.
call omp_set_num_threads( numThreads )
!compare with simple parallel for and parallel for reduction
! each thread gets local copy of local_sum ...
!$omp parallel private(local_sum, t1, t2) shared(the_sum)
local_sum = 0
t1 = omp_get_wtime()
!the array is distributed statically between threads. Also try dynamic?
!$omp do schedule(static,iter_chunk)
do i = 1, N
local_sum = local_sum + data(i)
end do
t2 = omp_get_wtime();
!each thread calculated its local_sum. ALl threads have to add to
!the global sum. It is critical that this operation is atomic.
!$omp critical
the_sum = the_sum + local_sum
!$omp end critical
elapsed(omp_get_thread_num()) = t2 - t1
!$omp end parallel
do i = 0, numThreads - 1
print *, "Elapsed time for thread " ,i, " = ",elapsed(i)
end do
the_average = the_sum / N
end subroutine get_average_v1
end module fms_diag_averaging_dummy