FrontISTR 5.2.0
Large-scale structural analysis program with finit element method
Loading...
Searching...
No Matches
hecmw_solver_las_33.f90
Go to the documentation of this file.
1!-------------------------------------------------------------------------------
2! Copyright (c) 2019 FrontISTR Commons
3! This software is released under the MIT License, see LICENSE.txt
4!-------------------------------------------------------------------------------
5
7 use hecmw_util
8 implicit none
9
10 private
11
12 public :: hecmw_matvec_33
15 public :: hecmw_matresid_33
16 public :: hecmw_rel_resid_l2_33
17 public :: hecmw_tvec_33
18 public :: hecmw_ttvec_33
19 public :: hecmw_ttmattvec_33
20 public :: hecmw_mat_diag_sr_33
21
22 ! ! for communication hiding in matvec
23 ! integer(kind=kint), save, allocatable :: index_o(:), item_o(:)
24 ! real(kind=kreal), save, allocatable :: A_o(:)
25 logical, save :: async_matvec_flg = .false.
26
27contains
28
29 !C
30 !C***
31 !C*** hecmw_matvec_33
32 !C***
33 !C
34 subroutine hecmw_matvec_33 (hecMESH, hecMAT, X, Y, time_Ax, COMMtime)
35 use hecmw_util
37 implicit none
38 type (hecmwst_local_mesh), intent(in) :: hecmesh
39 type (hecmwst_matrix), intent(in), target :: hecmat
40 real(kind=kreal), intent(in) :: x(:)
41 real(kind=kreal), intent(out) :: y(:)
42 real(kind=kreal), intent(inout) :: time_ax
43 real(kind=kreal), intent(inout), optional :: commtime
44
45 real(kind=kreal) :: tcomm
46 real(kind=kreal), allocatable :: wk(:)
47
48 tcomm = 0.d0
49
50 if (hecmw_mat_get_flag_mpcmatvec(hecmat) /= 0) then
51 allocate(wk(hecmat%NP * hecmat%NDOF))
52 call hecmw_ttmattvec_33(hecmesh, hecmat, x, y, wk, tcomm)
53 deallocate(wk)
54 else
55 call hecmw_matvec_33_inner(hecmesh, hecmat, x, y, time_ax, tcomm)
56 endif
57
58 if (present(commtime)) commtime = commtime + tcomm
59 end subroutine hecmw_matvec_33
60
61 !C
62 !C***
63 !C*** hecmw_matvec_33_set_async
64 !C***
65 !C
66 subroutine hecmw_matvec_33_set_async (hecMAT)
67 use hecmw_util
68 implicit none
69 type (hecmwst_matrix), intent(in) :: hecmat
70 ! integer(kind=kint) :: i, j, jS, jE, idx, in
71
72 ! allocate(index_o(0:hecMAT%N))
73 ! index_o(0) = 0
74 ! do i = 1, hecMAT%N
75 ! jS= hecMAT%indexU(i-1) + 1
76 ! jE= hecMAT%indexU(i )
77 ! idx = index_o(i-1)
78 ! do j= jS, jE
79 ! in = hecMAT%itemU(j)
80 ! if (in <= hecMAT%N) cycle
81 ! idx = idx + 1
82 ! enddo
83 ! index_o(i) = idx
84 ! enddo
85 ! allocate(item_o(idx))
86 ! allocate(A_o(idx*9))
87 ! do i = 1, hecMAT%N
88 ! jS= hecMAT%indexU(i-1) + 1
89 ! jE= hecMAT%indexU(i )
90 ! idx = index_o(i-1)
91 ! do j= jS, jE
92 ! in = hecMAT%itemU(j)
93 ! if (in <= hecMAT%N) cycle
94 ! idx = idx + 1
95 ! item_o(idx) = hecMAT%itemU(j) - hecMAT%N
96 ! A_o(9*idx-8:9*idx) = hecMAT%AU(9*j-8:9*j)
97 ! enddo
98 ! enddo
99 ! async_matvec_flg = .true.
100 end subroutine hecmw_matvec_33_set_async
101
102 !C
103 !C***
104 !C*** hecmw_matvec_33_unset_async
105 !C***
106 !C
108 implicit none
109 ! if (allocated(index_o)) deallocate(index_o)
110 ! if (allocated(item_o)) deallocate(item_o)
111 ! if (allocated(A_o)) deallocate(A_o)
112 ! async_matvec_flg = .false.
113 end subroutine hecmw_matvec_33_unset_async
114
115 !C
116 !C***
117 !C*** hecmw_matvec_33_inner ( private subroutine )
118 !C***
119 !C
120 subroutine hecmw_matvec_33_inner (hecMESH, hecMAT, X, Y, time_Ax, COMMtime)
121 use hecmw_util
127 !$ use omp_lib
128
129 implicit none
130 type (hecmwst_local_mesh), intent(in) :: hecmesh
131 type (hecmwst_matrix), intent(in), target :: hecmat
132 real(kind=kreal), intent(in) :: x(:)
133 real(kind=kreal), intent(out) :: y(:)
134 real(kind=kreal), intent(inout) :: time_ax
135 real(kind=kreal), intent(inout), optional :: commtime
136
137 real(kind=kreal) :: start_time, end_time, tcomm
138 integer(kind=kint) :: i, j, js, je, in
139 real(kind=kreal) :: yv1, yv2, yv3, x1, x2, x3
140
141 integer(kind=kint) :: n, np
142 integer(kind=kint), pointer :: indexl(:), iteml(:), indexu(:), itemu(:)
143 real(kind=kreal), pointer :: al(:), au(:), d(:)
144
145 ! added for turning >>>
146 integer, parameter :: numofblockperthread = 100
147 logical, save :: isfirst = .true.
148 integer, save :: numofthread = 1
149 integer, save, allocatable :: startpos(:), endpos(:)
150 integer(kind=kint), save :: sectorcachesize0, sectorcachesize1
151 integer(kind=kint) :: threadnum, blocknum, numofblock
152 integer(kind=kint) :: numofelement, elementcount, blockindex
153 real(kind=kreal) :: numofelementperblock
154 ! <<< added for turning
155
156 if (hecmw_jad_is_initialized().ne.0) then
157 tcomm = 0.d0
158 start_time = hecmw_wtime()
159 call hecmw_jad_matvec(hecmesh, hecmat, x, y, tcomm)
160 end_time = hecmw_wtime()
161 time_ax = time_ax + end_time - start_time - tcomm
162 if (present(commtime)) commtime = commtime + tcomm
163 else
164
165 n = hecmat%N
166 np = hecmat%NP
167 indexl => hecmat%indexL
168 indexu => hecmat%indexU
169 iteml => hecmat%itemL
170 itemu => hecmat%itemU
171 al => hecmat%AL
172 au => hecmat%AU
173 d => hecmat%D
174
175 ! added for turning >>>
176 if (.not. isfirst) then
177 numofblock = numofthread * numofblockperthread
178 if (endpos(numofblock-1) .ne. n-1) then
179 deallocate(startpos, endpos)
180 isfirst = .true.
181 endif
182 endif
183 if (isfirst) then
184 !$ numOfThread = omp_get_max_threads()
185 numofblock = numofthread * numofblockperthread
186 allocate (startpos(0 : numofblock - 1), endpos(0 : numofblock - 1))
187 numofelement = n + indexl(n) + indexu(n)
188 numofelementperblock = dble(numofelement) / numofblock
189 blocknum = 0
190 elementcount = 0
191 startpos(blocknum) = 1
192 do i= 1, n
193 elementcount = elementcount + 1
194 elementcount = elementcount + (indexl(i) - indexl(i-1))
195 elementcount = elementcount + (indexu(i) - indexu(i-1))
196 if (elementcount > (blocknum + 1) * numofelementperblock) then
197 endpos(blocknum) = i
198 ! write(9000+hecMESH%my_rank,*) mod(blockNum, numOfThread), &
199 ! startPos(blockNum), endPos(blockNum)
200 blocknum = blocknum + 1
201 startpos(blocknum) = i + 1
202 if (blocknum == (numofblock - 1)) exit
203 endif
204 enddo
205 endpos(blocknum) = n
206 ! write(9000+hecMESH%my_rank,*) mod(blockNum, numOfThread), &
207 ! startPos(blockNum), endPos(blockNum)
208 ! for irregular data
209 do i= blocknum+1, numofblock-1
210 startpos(i) = n
211 endpos(i) = n-1
212 ! write(9000+hecMESH%my_rank,*) mod(i, numOfThread), &
213 ! startPos(i), endPos(i)
214 end do
215
217 sectorcachesize0, sectorcachesize1)
218
219 isfirst = .false.
220 endif
221 ! <<< added for turning
222
223 start_time= hecmw_wtime()
224 ! if (async_matvec_flg) then
225 ! call hecmw_update_3_R_async (hecMESH, X, NP, ireq)
226 ! else
227 call hecmw_update_3_r (hecmesh, x, np)
228 ! endif
229 end_time= hecmw_wtime()
230 if (present(commtime)) commtime = commtime + end_time - start_time
231
232 start_time = hecmw_wtime()
233
234 !call fapp_start("loopInMatvec33", 1, 0)
235 !call start_collection("loopInMatvec33")
236
237 !OCL CACHE_SECTOR_SIZE(sectorCacheSize0,sectorCacheSize1)
238 !OCL CACHE_SUBSECTOR_ASSIGN(X)
239
240 !$OMP PARALLEL DEFAULT(NONE) &
241 !$OMP&PRIVATE(i,X1,X2,X3,YV1,YV2,YV3,jS,jE,j,in,threadNum,blockNum,blockIndex) &
242 !$OMP&SHARED(D,AL,AU,indexL,itemL,indexU,itemU,X,Y,startPos,endPos,numOfThread,N,async_matvec_flg)
243 threadnum = 0
244 !$ threadNum = omp_get_thread_num()
245 do blocknum = 0 , numofblockperthread - 1
246 blockindex = blocknum * numofthread + threadnum
247 do i = startpos(blockindex), endpos(blockindex)
248 x1= x(3*i-2)
249 x2= x(3*i-1)
250 x3= x(3*i )
251 yv1= d(9*i-8)*x1 + d(9*i-7)*x2 + d(9*i-6)*x3
252 yv2= d(9*i-5)*x1 + d(9*i-4)*x2 + d(9*i-3)*x3
253 yv3= d(9*i-2)*x1 + d(9*i-1)*x2 + d(9*i )*x3
254
255 js= indexl(i-1) + 1
256 je= indexl(i )
257 do j= js, je
258 in = iteml(j)
259 x1= x(3*in-2)
260 x2= x(3*in-1)
261 x3= x(3*in )
262 yv1= yv1 + al(9*j-8)*x1 + al(9*j-7)*x2 + al(9*j-6)*x3
263 yv2= yv2 + al(9*j-5)*x1 + al(9*j-4)*x2 + al(9*j-3)*x3
264 yv3= yv3 + al(9*j-2)*x1 + al(9*j-1)*x2 + al(9*j )*x3
265 enddo
266 js= indexu(i-1) + 1
267 je= indexu(i )
268 do j= js, je
269 in = itemu(j)
270 ! if (async_matvec_flg .and. in > N) cycle
271 x1= x(3*in-2)
272 x2= x(3*in-1)
273 x3= x(3*in )
274 yv1= yv1 + au(9*j-8)*x1 + au(9*j-7)*x2 + au(9*j-6)*x3
275 yv2= yv2 + au(9*j-5)*x1 + au(9*j-4)*x2 + au(9*j-3)*x3
276 yv3= yv3 + au(9*j-2)*x1 + au(9*j-1)*x2 + au(9*j )*x3
277 enddo
278 y(3*i-2)= yv1
279 y(3*i-1)= yv2
280 y(3*i )= yv3
281 enddo
282 enddo
283 !$OMP END PARALLEL
284
285 !OCL END_CACHE_SUBSECTOR
286 !OCL END_CACHE_SECTOR_SIZE
287
288 !call stop_collection("loopInMatvec33")
289 !call fapp_stop("loopInMatvec33", 1, 0)
290
291 end_time = hecmw_wtime()
292 time_ax = time_ax + end_time - start_time
293
294 ! if (async_matvec_flg) then
295 ! START_TIME= HECMW_WTIME()
296 ! call hecmw_update_3_R_wait (hecMESH, ireq)
297 ! END_TIME= HECMW_WTIME()
298 ! if (present(COMMtime)) COMMtime = COMMtime + END_TIME - START_TIME
299
300 ! START_TIME = hecmw_Wtime()
301
302 ! do i = 1, N
303 ! jS= index_o(i-1) + 1
304 ! jE= index_o(i )
305 ! if (jS > jE) cycle
306 ! YV1= 0.d0
307 ! YV2= 0.d0
308 ! YV3= 0.d0
309 ! do j=jS, jE
310 ! in = item_o(j)
311 ! X1= X(3*(N+in)-2)
312 ! X2= X(3*(N+in)-1)
313 ! X3= X(3*(N+in) )
314 ! YV1= YV1 + A_o(9*j-8)*X1 + A_o(9*j-7)*X2 + A_o(9*j-6)*X3
315 ! YV2= YV2 + A_o(9*j-5)*X1 + A_o(9*j-4)*X2 + A_o(9*j-3)*X3
316 ! YV3= YV3 + A_o(9*j-2)*X1 + A_o(9*j-1)*X2 + A_o(9*j )*X3
317 ! enddo
318 ! Y(3*i-2)= Y(3*i-2)+YV1
319 ! Y(3*i-1)= Y(3*i-1)+YV2
320 ! Y(3*i )= Y(3*i )+YV3
321 ! enddo
322
323 ! END_TIME = hecmw_Wtime()
324 ! time_Ax = time_Ax + END_TIME - START_TIME
325 ! endif
326
327 endif
328
329 if (hecmat%cmat%n_val > 0) then
330 call hecmw_cmat_multvec_add( hecmat%cmat, x, y, np * hecmat%NDOF )
331 end if
332
333 end subroutine hecmw_matvec_33_inner
334
335 !C
336 !C***
337 !C*** hecmw_matresid_33
338 !C***
339 !C
340 subroutine hecmw_matresid_33 (hecMESH, hecMAT, X, B, R, COMMtime)
341 use hecmw_util
342 implicit none
343 type (hecmwst_local_mesh), intent(in) :: hecmesh
344 type (hecmwst_matrix), intent(in) :: hecmat
345 real(kind=kreal), intent(in) :: x(:), b(:)
346 real(kind=kreal), intent(out) :: r(:)
347 real(kind=kreal), intent(inout), optional :: commtime
348
349 integer(kind=kint) :: i
350 real(kind=kreal) :: tcomm
351
352 tcomm = 0.d0
353 call hecmw_matvec_33 (hecmesh, hecmat, x, r, tcomm)
354 if (present(commtime)) commtime = commtime + tcomm
355 !$omp parallel default(none),private(i),shared(hecMAT,R,B)
356 !$omp do
357 do i = 1, hecmat%N * 3
358 r(i) = b(i) - r(i)
359 enddo
360 !$omp end do
361 !$omp end parallel
362 end subroutine hecmw_matresid_33
363
364 !C
365 !C***
366 !C*** hecmw_rel_resid_L2_33
367 !C***
368 !C
369 function hecmw_rel_resid_l2_33 (hecMESH, hecMAT, COMMtime)
370 use hecmw_util
372 implicit none
373 real(kind=kreal) :: hecmw_rel_resid_l2_33
374 type ( hecmwst_local_mesh ), intent(in) :: hecmesh
375 type ( hecmwst_matrix ), intent(in) :: hecmat
376 real(kind=kreal), intent(inout), optional :: commtime
377
378 real(kind=kreal), allocatable :: r(:)
379 real(kind=kreal) :: bnorm2, rnorm2
380 real(kind=kreal) :: tcomm
381
382 allocate(r(hecmat%NDOF*hecmat%NP))
383
384 tcomm = 0.d0
385 call hecmw_innerproduct_r(hecmesh, hecmat%NDOF, &
386 hecmat%B, hecmat%B, bnorm2, tcomm)
387 if (bnorm2 == 0.d0) then
388 bnorm2 = 1.d0
389 endif
390 call hecmw_matresid_33(hecmesh, hecmat, hecmat%X, hecmat%B, r, tcomm)
391 call hecmw_innerproduct_r(hecmesh, hecmat%NDOF, r, r, rnorm2, tcomm)
392 hecmw_rel_resid_l2_33 = sqrt(rnorm2 / bnorm2)
393
394 if (present(commtime)) commtime = commtime + tcomm
395
396 deallocate(r)
397 end function hecmw_rel_resid_l2_33
398
399 !C
400 !C***
401 !C*** hecmw_Tvec_33
402 !C***
403 !C
404 subroutine hecmw_tvec_33 (hecMESH, X, Y, COMMtime)
405 use hecmw_util
407 implicit none
408 type (hecmwst_local_mesh), intent(in) :: hecmesh
409 real(kind=kreal), intent(in) :: x(:)
410 real(kind=kreal), intent(out) :: y(:)
411 real(kind=kreal), intent(inout) :: commtime
412
413 real(kind=kreal) :: start_time, end_time
414 integer(kind=kint) :: i, j, jj, k, kk
415
416 start_time= hecmw_wtime()
417 call hecmw_update_3_r (hecmesh, x, hecmesh%n_node)
418 end_time= hecmw_wtime()
419 commtime = commtime + end_time - start_time
420
421 !$omp parallel default(none),private(i,k,kk,j,jj),shared(hecMESH,X,Y)
422 !$omp do
423 do i= 1, hecmesh%nn_internal * hecmesh%n_dof
424 y(i)= x(i)
425 enddo
426 !$omp end do
427
428 !$omp do
429 outer: do i= 1, hecmesh%mpc%n_mpc
430 do j= hecmesh%mpc%mpc_index(i-1) + 1, hecmesh%mpc%mpc_index(i)
431 if (hecmesh%mpc%mpc_dof(j) > 3) cycle outer
432 enddo
433 k = hecmesh%mpc%mpc_index(i-1) + 1
434 kk = 3 * (hecmesh%mpc%mpc_item(k) - 1) + hecmesh%mpc%mpc_dof(k)
435 y(kk) = 0.d0
436 do j= hecmesh%mpc%mpc_index(i-1) + 2, hecmesh%mpc%mpc_index(i)
437 jj = 3 * (hecmesh%mpc%mpc_item(j) - 1) + hecmesh%mpc%mpc_dof(j)
438 y(kk) = y(kk) - hecmesh%mpc%mpc_val(j) * x(jj)
439 enddo
440 enddo outer
441 !$omp end do
442 !$omp end parallel
443
444 end subroutine hecmw_tvec_33
445
446 !C
447 !C***
448 !C*** hecmw_Ttvec_33
449 !C***
450 !C
451 subroutine hecmw_ttvec_33 (hecMESH, X, Y, COMMtime)
452 use hecmw_util
454 implicit none
455 type (hecmwst_local_mesh), intent(in) :: hecmesh
456 real(kind=kreal), intent(in) :: x(:)
457 real(kind=kreal), intent(out) :: y(:)
458 real(kind=kreal), intent(inout) :: commtime
459
460 real(kind=kreal) :: start_time, end_time
461 integer(kind=kint) :: i, j, jj, k, kk
462
463 start_time= hecmw_wtime()
464 call hecmw_update_3_r (hecmesh, x, hecmesh%n_node)
465 end_time= hecmw_wtime()
466 commtime = commtime + end_time - start_time
467
468 !$omp parallel default(none),private(i,k,kk,j,jj),shared(hecMESH,X,Y)
469 !$omp do
470 do i= 1, hecmesh%nn_internal * hecmesh%n_dof
471 y(i)= x(i)
472 enddo
473 !$omp end do
474
475 !$omp do
476 outer: do i= 1, hecmesh%mpc%n_mpc
477 do j= hecmesh%mpc%mpc_index(i-1) + 1, hecmesh%mpc%mpc_index(i)
478 if (hecmesh%mpc%mpc_dof(j) > 3) cycle outer
479 enddo
480 k = hecmesh%mpc%mpc_index(i-1) + 1
481 kk = 3 * (hecmesh%mpc%mpc_item(k) - 1) + hecmesh%mpc%mpc_dof(k)
482 y(kk) = 0.d0
483 do j= hecmesh%mpc%mpc_index(i-1) + 2, hecmesh%mpc%mpc_index(i)
484 jj = 3 * (hecmesh%mpc%mpc_item(j) - 1) + hecmesh%mpc%mpc_dof(j)
485 y(jj) = y(jj) - hecmesh%mpc%mpc_val(j) * x(kk)
486 enddo
487 enddo outer
488 !$omp end do
489 !$omp end parallel
490
491 end subroutine hecmw_ttvec_33
492
493 !C
494 !C***
495 !C*** hecmw_TtmatTvec_33
496 !C***
497 !C
498 subroutine hecmw_ttmattvec_33 (hecMESH, hecMAT, X, Y, W, COMMtime)
499 use hecmw_util
500 implicit none
501 type (hecmwst_local_mesh), intent(in) :: hecmesh
502 type (hecmwst_matrix), intent(in) :: hecmat
503 real(kind=kreal), intent(in) :: x(:)
504 real(kind=kreal), intent(out) :: y(:), w(:)
505 real(kind=kreal), intent(inout) :: commtime
506
507 call hecmw_tvec_33(hecmesh, x, y, commtime)
508 call hecmw_matvec_33_inner(hecmesh, hecmat, y, w, commtime)
509 call hecmw_ttvec_33(hecmesh, w, y, commtime)
510
511 end subroutine hecmw_ttmattvec_33
512
513
514 !C
515 !C***
516 !C*** hecmw_mat_diag_sr_33
517 !C***
518 !C
519 subroutine hecmw_mat_diag_sr_33(hecMESH, hecMAT, COMMtime)
520 use hecmw_util
522 implicit none
523 type (hecmwst_local_mesh), intent(in) :: hecmesh
524 type (hecmwst_matrix), intent(inout), target :: hecmat
525 real(kind=kreal), intent(inout), optional :: commtime
526 real(kind=kreal), allocatable :: w(:,:)
527 real(kind=kreal), pointer :: d(:)
528 integer(kind=kint) :: ip
529 real(kind=kreal) :: start_time, end_time
530 allocate(w(3*hecmat%NP,3))
531 d => hecmat%D
532 do ip= 1, hecmat%N
533 w(3*ip-2,1)= d(9*ip-8); w(3*ip-2,2)= d(9*ip-7); w(3*ip-2,3)= d(9*ip-6)
534 w(3*ip-1,1)= d(9*ip-5); w(3*ip-1,2)= d(9*ip-4); w(3*ip-1,3)= d(9*ip-3)
535 w(3*ip ,1)= d(9*ip-2); w(3*ip ,2)= d(9*ip-1); w(3*ip ,3)= d(9*ip )
536 enddo
537 start_time= hecmw_wtime()
538 call hecmw_update_3_r (hecmesh, w(:,1), hecmat%NP)
539 call hecmw_update_3_r (hecmesh, w(:,2), hecmat%NP)
540 call hecmw_update_3_r (hecmesh, w(:,3), hecmat%NP)
541 end_time= hecmw_wtime()
542 if (present(commtime)) commtime = commtime + end_time - start_time
543 do ip= hecmat%N+1, hecmat%NP
544 d(9*ip-8)= w(3*ip-2,1); d(9*ip-7)= w(3*ip-2,2); d(9*ip-6)= w(3*ip-2,3)
545 d(9*ip-5)= w(3*ip-1,1); d(9*ip-4)= w(3*ip-1,2); d(9*ip-3)= w(3*ip-1,3)
546 d(9*ip-2)= w(3*ip ,1); d(9*ip-1)= w(3*ip ,2); d(9*ip )= w(3*ip ,3)
547 enddo
548 deallocate(w)
549 end subroutine hecmw_mat_diag_sr_33
550
551end module hecmw_solver_las_33
Jagged Diagonal Matrix storage for vector processors. Original code was provided by JAMSTEC.
Definition: hecmw_jadm.f90:8
integer(kind=kint) function, public hecmw_jad_is_initialized()
Definition: hecmw_jadm.f90:56
subroutine, public hecmw_jad_matvec(hecmesh, hecmat, x, y, commtime)
Definition: hecmw_jadm.f90:61
subroutine, public hecmw_cmat_multvec_add(cmat, x, y, len)
integer(kind=kint) function, public hecmw_mat_get_flag_mpcmatvec(hecmat)
subroutine, public hecmw_ttvec_33(hecmesh, x, y, commtime)
subroutine, public hecmw_tvec_33(hecmesh, x, y, commtime)
subroutine, public hecmw_matvec_33_unset_async
subroutine, public hecmw_matvec_33_set_async(hecmat)
subroutine, public hecmw_matresid_33(hecmesh, hecmat, x, b, r, commtime)
real(kind=kreal) function, public hecmw_rel_resid_l2_33(hecmesh, hecmat, commtime)
subroutine, public hecmw_mat_diag_sr_33(hecmesh, hecmat, commtime)
subroutine, public hecmw_matvec_33(hecmesh, hecmat, x, y, time_ax, commtime)
subroutine, public hecmw_ttmattvec_33(hecmesh, hecmat, x, y, w, commtime)
subroutine hecmw_innerproduct_r(hecmesh, ndof, x, y, sum, commtime)
subroutine, public hecmw_tuning_fx_calc_sector_cache(n, ndof, sectorcachesize0, sectorcachesize1)
I/O and Utility.
Definition: hecmw_util_f.F90:7
integer(kind=4), parameter kreal
real(kind=kreal) function hecmw_wtime()
subroutine hecmw_update_3_r(hecmesh, val, n)