Actual source code: mpiaij.c
petsc-3.13.1 2020-05-02
1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
2: #include <petsc/private/vecimpl.h>
3: #include <petsc/private/vecscatterimpl.h>
4: #include <petsc/private/isimpl.h>
5: #include <petscblaslapack.h>
6: #include <petscsf.h>
7: #include <petsc/private/hashmapi.h>
9: /*MC
10: MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
12: This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
13: and MATMPIAIJ otherwise. As a result, for single process communicators,
14: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation() is supported
15: for communicators controlling multiple processes. It is recommended that you call both of
16: the above preallocation routines for simplicity.
18: Options Database Keys:
19: . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()
21: Developer Notes:
22: Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJSELL, MATAIJMKL, MATAIJCRL, and also automatically switches over to use inodes when
23: enough exist.
25: Level: beginner
27: .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
28: M*/
30: /*MC
31: MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
33: This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
34: and MATMPIAIJCRL otherwise. As a result, for single process communicators,
35: MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
36: for communicators controlling multiple processes. It is recommended that you call both of
37: the above preallocation routines for simplicity.
39: Options Database Keys:
40: . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()
42: Level: beginner
44: .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
45: M*/
47: static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A,PetscBool flg)
48: {
49: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
53: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_VIENNACL)
54: A->boundtocpu = flg;
55: #endif
56: if (a->A) {
57: MatBindToCPU(a->A,flg);
58: }
59: if (a->B) {
60: MatBindToCPU(a->B,flg);
61: }
62: return(0);
63: }
66: PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
67: {
69: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
72: if (mat->A) {
73: MatSetBlockSizes(mat->A,rbs,cbs);
74: MatSetBlockSizes(mat->B,rbs,1);
75: }
76: return(0);
77: }
79: PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
80: {
81: PetscErrorCode ierr;
82: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data;
83: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data;
84: Mat_SeqAIJ *b = (Mat_SeqAIJ*)mat->B->data;
85: const PetscInt *ia,*ib;
86: const MatScalar *aa,*bb;
87: PetscInt na,nb,i,j,*rows,cnt=0,n0rows;
88: PetscInt m = M->rmap->n,rstart = M->rmap->rstart;
91: *keptrows = 0;
92: ia = a->i;
93: ib = b->i;
94: for (i=0; i<m; i++) {
95: na = ia[i+1] - ia[i];
96: nb = ib[i+1] - ib[i];
97: if (!na && !nb) {
98: cnt++;
99: goto ok1;
100: }
101: aa = a->a + ia[i];
102: for (j=0; j<na; j++) {
103: if (aa[j] != 0.0) goto ok1;
104: }
105: bb = b->a + ib[i];
106: for (j=0; j <nb; j++) {
107: if (bb[j] != 0.0) goto ok1;
108: }
109: cnt++;
110: ok1:;
111: }
112: MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));
113: if (!n0rows) return(0);
114: PetscMalloc1(M->rmap->n-cnt,&rows);
115: cnt = 0;
116: for (i=0; i<m; i++) {
117: na = ia[i+1] - ia[i];
118: nb = ib[i+1] - ib[i];
119: if (!na && !nb) continue;
120: aa = a->a + ia[i];
121: for (j=0; j<na;j++) {
122: if (aa[j] != 0.0) {
123: rows[cnt++] = rstart + i;
124: goto ok2;
125: }
126: }
127: bb = b->a + ib[i];
128: for (j=0; j<nb; j++) {
129: if (bb[j] != 0.0) {
130: rows[cnt++] = rstart + i;
131: goto ok2;
132: }
133: }
134: ok2:;
135: }
136: ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);
137: return(0);
138: }
140: PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
141: {
142: PetscErrorCode ierr;
143: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) Y->data;
144: PetscBool cong;
147: MatHasCongruentLayouts(Y,&cong);
148: if (Y->assembled && cong) {
149: MatDiagonalSet(aij->A,D,is);
150: } else {
151: MatDiagonalSet_Default(Y,D,is);
152: }
153: return(0);
154: }
156: PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
157: {
158: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)M->data;
160: PetscInt i,rstart,nrows,*rows;
163: *zrows = NULL;
164: MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);
165: MatGetOwnershipRange(M,&rstart,NULL);
166: for (i=0; i<nrows; i++) rows[i] += rstart;
167: ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);
168: return(0);
169: }
171: PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
172: {
174: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
175: PetscInt i,n,*garray = aij->garray;
176: Mat_SeqAIJ *a_aij = (Mat_SeqAIJ*) aij->A->data;
177: Mat_SeqAIJ *b_aij = (Mat_SeqAIJ*) aij->B->data;
178: PetscReal *work;
181: MatGetSize(A,NULL,&n);
182: PetscCalloc1(n,&work);
183: if (type == NORM_2) {
184: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
185: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
186: }
187: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
188: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
189: }
190: } else if (type == NORM_1) {
191: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
192: work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
193: }
194: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
195: work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
196: }
197: } else if (type == NORM_INFINITY) {
198: for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
199: work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
200: }
201: for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
202: work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
203: }
205: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
206: if (type == NORM_INFINITY) {
207: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));
208: } else {
209: MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));
210: }
211: PetscFree(work);
212: if (type == NORM_2) {
213: for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
214: }
215: return(0);
216: }
218: PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
219: {
220: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
221: IS sis,gis;
222: PetscErrorCode ierr;
223: const PetscInt *isis,*igis;
224: PetscInt n,*iis,nsis,ngis,rstart,i;
227: MatFindOffBlockDiagonalEntries(a->A,&sis);
228: MatFindNonzeroRows(a->B,&gis);
229: ISGetSize(gis,&ngis);
230: ISGetSize(sis,&nsis);
231: ISGetIndices(sis,&isis);
232: ISGetIndices(gis,&igis);
234: PetscMalloc1(ngis+nsis,&iis);
235: PetscArraycpy(iis,igis,ngis);
236: PetscArraycpy(iis+ngis,isis,nsis);
237: n = ngis + nsis;
238: PetscSortRemoveDupsInt(&n,iis);
239: MatGetOwnershipRange(A,&rstart,NULL);
240: for (i=0; i<n; i++) iis[i] += rstart;
241: ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);
243: ISRestoreIndices(sis,&isis);
244: ISRestoreIndices(gis,&igis);
245: ISDestroy(&sis);
246: ISDestroy(&gis);
247: return(0);
248: }
250: /*
251: Distributes a SeqAIJ matrix across a set of processes. Code stolen from
252: MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
254: Only for square matrices
256: Used by a preconditioner, hence PETSC_EXTERN
257: */
258: PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
259: {
260: PetscMPIInt rank,size;
261: PetscInt *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
263: Mat mat;
264: Mat_SeqAIJ *gmata;
265: PetscMPIInt tag;
266: MPI_Status status;
267: PetscBool aij;
268: MatScalar *gmataa,*ao,*ad,*gmataarestore=0;
271: MPI_Comm_rank(comm,&rank);
272: MPI_Comm_size(comm,&size);
273: if (!rank) {
274: PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);
275: if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
276: }
277: if (reuse == MAT_INITIAL_MATRIX) {
278: MatCreate(comm,&mat);
279: MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
280: MatGetBlockSizes(gmat,&bses[0],&bses[1]);
281: MPI_Bcast(bses,2,MPIU_INT,0,comm);
282: MatSetBlockSizes(mat,bses[0],bses[1]);
283: MatSetType(mat,MATAIJ);
284: PetscMalloc1(size+1,&rowners);
285: PetscMalloc2(m,&dlens,m,&olens);
286: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
288: rowners[0] = 0;
289: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
290: rstart = rowners[rank];
291: rend = rowners[rank+1];
292: PetscObjectGetNewTag((PetscObject)mat,&tag);
293: if (!rank) {
294: gmata = (Mat_SeqAIJ*) gmat->data;
295: /* send row lengths to all processors */
296: for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
297: for (i=1; i<size; i++) {
298: MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);
299: }
300: /* determine number diagonal and off-diagonal counts */
301: PetscArrayzero(olens,m);
302: PetscCalloc1(m,&ld);
303: jj = 0;
304: for (i=0; i<m; i++) {
305: for (j=0; j<dlens[i]; j++) {
306: if (gmata->j[jj] < rstart) ld[i]++;
307: if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
308: jj++;
309: }
310: }
311: /* send column indices to other processes */
312: for (i=1; i<size; i++) {
313: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
314: MPI_Send(&nz,1,MPIU_INT,i,tag,comm);
315: MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);
316: }
318: /* send numerical values to other processes */
319: for (i=1; i<size; i++) {
320: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
321: MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
322: }
323: gmataa = gmata->a;
324: gmataj = gmata->j;
326: } else {
327: /* receive row lengths */
328: MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);
329: /* receive column indices */
330: MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);
331: PetscMalloc2(nz,&gmataa,nz,&gmataj);
332: MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);
333: /* determine number diagonal and off-diagonal counts */
334: PetscArrayzero(olens,m);
335: PetscCalloc1(m,&ld);
336: jj = 0;
337: for (i=0; i<m; i++) {
338: for (j=0; j<dlens[i]; j++) {
339: if (gmataj[jj] < rstart) ld[i]++;
340: if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
341: jj++;
342: }
343: }
344: /* receive numerical values */
345: PetscArrayzero(gmataa,nz);
346: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
347: }
348: /* set preallocation */
349: for (i=0; i<m; i++) {
350: dlens[i] -= olens[i];
351: }
352: MatSeqAIJSetPreallocation(mat,0,dlens);
353: MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);
355: for (i=0; i<m; i++) {
356: dlens[i] += olens[i];
357: }
358: cnt = 0;
359: for (i=0; i<m; i++) {
360: row = rstart + i;
361: MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);
362: cnt += dlens[i];
363: }
364: if (rank) {
365: PetscFree2(gmataa,gmataj);
366: }
367: PetscFree2(dlens,olens);
368: PetscFree(rowners);
370: ((Mat_MPIAIJ*)(mat->data))->ld = ld;
372: *inmat = mat;
373: } else { /* column indices are already set; only need to move over numerical values from process 0 */
374: Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
375: Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
376: mat = *inmat;
377: PetscObjectGetNewTag((PetscObject)mat,&tag);
378: if (!rank) {
379: /* send numerical values to other processes */
380: gmata = (Mat_SeqAIJ*) gmat->data;
381: MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);
382: gmataa = gmata->a;
383: for (i=1; i<size; i++) {
384: nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
385: MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);
386: }
387: nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
388: } else {
389: /* receive numerical values from process 0*/
390: nz = Ad->nz + Ao->nz;
391: PetscMalloc1(nz,&gmataa); gmataarestore = gmataa;
392: MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);
393: }
394: /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
395: ld = ((Mat_MPIAIJ*)(mat->data))->ld;
396: ad = Ad->a;
397: ao = Ao->a;
398: if (mat->rmap->n) {
399: i = 0;
400: nz = ld[i]; PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
401: nz = Ad->i[i+1] - Ad->i[i]; PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
402: }
403: for (i=1; i<mat->rmap->n; i++) {
404: nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; PetscArraycpy(ao,gmataa,nz); ao += nz; gmataa += nz;
405: nz = Ad->i[i+1] - Ad->i[i]; PetscArraycpy(ad,gmataa,nz); ad += nz; gmataa += nz;
406: }
407: i--;
408: if (mat->rmap->n) {
409: nz = Ao->i[i+1] - Ao->i[i] - ld[i]; PetscArraycpy(ao,gmataa,nz);
410: }
411: if (rank) {
412: PetscFree(gmataarestore);
413: }
414: }
415: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
416: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
417: return(0);
418: }
420: /*
421: Local utility routine that creates a mapping from the global column
422: number to the local number in the off-diagonal part of the local
423: storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at
424: a slightly higher hash table cost; without it it is not scalable (each processor
425: has an order N integer array but is fast to acess.
426: */
427: PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
428: {
429: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
431: PetscInt n = aij->B->cmap->n,i;
434: if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
435: #if defined(PETSC_USE_CTABLE)
436: PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);
437: for (i=0; i<n; i++) {
438: PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);
439: }
440: #else
441: PetscCalloc1(mat->cmap->N+1,&aij->colmap);
442: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));
443: for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
444: #endif
445: return(0);
446: }
448: #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol) \
449: { \
450: if (col <= lastcol1) low1 = 0; \
451: else high1 = nrow1; \
452: lastcol1 = col;\
453: while (high1-low1 > 5) { \
454: t = (low1+high1)/2; \
455: if (rp1[t] > col) high1 = t; \
456: else low1 = t; \
457: } \
458: for (_i=low1; _i<high1; _i++) { \
459: if (rp1[_i] > col) break; \
460: if (rp1[_i] == col) { \
461: if (addv == ADD_VALUES) { \
462: ap1[_i] += value; \
463: /* Not sure LogFlops will slow dow the code or not */ \
464: (void)PetscLogFlops(1.0); \
465: } \
466: else ap1[_i] = value; \
467: inserted = PETSC_TRUE; \
468: goto a_noinsert; \
469: } \
470: } \
471: if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
472: if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \
473: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
474: MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
475: N = nrow1++ - 1; a->nz++; high1++; \
476: /* shift up all the later entries in this row */ \
477: PetscArraymove(rp1+_i+1,rp1+_i,N-_i+1);\
478: PetscArraymove(ap1+_i+1,ap1+_i,N-_i+1);\
479: rp1[_i] = col; \
480: ap1[_i] = value; \
481: A->nonzerostate++;\
482: a_noinsert: ; \
483: ailen[row] = nrow1; \
484: }
486: #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
487: { \
488: if (col <= lastcol2) low2 = 0; \
489: else high2 = nrow2; \
490: lastcol2 = col; \
491: while (high2-low2 > 5) { \
492: t = (low2+high2)/2; \
493: if (rp2[t] > col) high2 = t; \
494: else low2 = t; \
495: } \
496: for (_i=low2; _i<high2; _i++) { \
497: if (rp2[_i] > col) break; \
498: if (rp2[_i] == col) { \
499: if (addv == ADD_VALUES) { \
500: ap2[_i] += value; \
501: (void)PetscLogFlops(1.0); \
502: } \
503: else ap2[_i] = value; \
504: inserted = PETSC_TRUE; \
505: goto b_noinsert; \
506: } \
507: } \
508: if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
509: if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
510: if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
511: MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
512: N = nrow2++ - 1; b->nz++; high2++; \
513: /* shift up all the later entries in this row */ \
514: PetscArraymove(rp2+_i+1,rp2+_i,N-_i+1);\
515: PetscArraymove(ap2+_i+1,ap2+_i,N-_i+1);\
516: rp2[_i] = col; \
517: ap2[_i] = value; \
518: B->nonzerostate++; \
519: b_noinsert: ; \
520: bilen[row] = nrow2; \
521: }
523: PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
524: {
525: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
526: Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
528: PetscInt l,*garray = mat->garray,diag;
531: /* code only works for square matrices A */
533: /* find size of row to the left of the diagonal part */
534: MatGetOwnershipRange(A,&diag,0);
535: row = row - diag;
536: for (l=0; l<b->i[row+1]-b->i[row]; l++) {
537: if (garray[b->j[b->i[row]+l]] > diag) break;
538: }
539: PetscArraycpy(b->a+b->i[row],v,l);
541: /* diagonal part */
542: PetscArraycpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row]));
544: /* right of diagonal part */
545: PetscArraycpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],b->i[row+1]-b->i[row]-l);
546: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
547: if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (l || (a->i[row+1]-a->i[row]) || (b->i[row+1]-b->i[row]-l))) A->offloadmask = PETSC_OFFLOAD_CPU;
548: #endif
549: return(0);
550: }
552: PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
553: {
554: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
555: PetscScalar value = 0.0;
557: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
558: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
559: PetscBool roworiented = aij->roworiented;
561: /* Some Variables required in the macro */
562: Mat A = aij->A;
563: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
564: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
565: MatScalar *aa = a->a;
566: PetscBool ignorezeroentries = a->ignorezeroentries;
567: Mat B = aij->B;
568: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
569: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
570: MatScalar *ba = b->a;
571: /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
572: * cannot use "#if defined" inside a macro. */
573: PETSC_UNUSED PetscBool inserted = PETSC_FALSE;
575: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
576: PetscInt nonew;
577: MatScalar *ap1,*ap2;
580: for (i=0; i<m; i++) {
581: if (im[i] < 0) continue;
582: #if defined(PETSC_USE_DEBUG)
583: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
584: #endif
585: if (im[i] >= rstart && im[i] < rend) {
586: row = im[i] - rstart;
587: lastcol1 = -1;
588: rp1 = aj + ai[row];
589: ap1 = aa + ai[row];
590: rmax1 = aimax[row];
591: nrow1 = ailen[row];
592: low1 = 0;
593: high1 = nrow1;
594: lastcol2 = -1;
595: rp2 = bj + bi[row];
596: ap2 = ba + bi[row];
597: rmax2 = bimax[row];
598: nrow2 = bilen[row];
599: low2 = 0;
600: high2 = nrow2;
602: for (j=0; j<n; j++) {
603: if (v) value = roworiented ? v[i*n+j] : v[i+j*m];
604: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
605: if (in[j] >= cstart && in[j] < cend) {
606: col = in[j] - cstart;
607: nonew = a->nonew;
608: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
609: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
610: if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
611: #endif
612: } else if (in[j] < 0) continue;
613: #if defined(PETSC_USE_DEBUG)
614: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
615: #endif
616: else {
617: if (mat->was_assembled) {
618: if (!aij->colmap) {
619: MatCreateColmap_MPIAIJ_Private(mat);
620: }
621: #if defined(PETSC_USE_CTABLE)
622: PetscTableFind(aij->colmap,in[j]+1,&col);
623: col--;
624: #else
625: col = aij->colmap[in[j]] - 1;
626: #endif
627: if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
628: MatDisAssemble_MPIAIJ(mat);
629: col = in[j];
630: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
631: B = aij->B;
632: b = (Mat_SeqAIJ*)B->data;
633: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
634: rp2 = bj + bi[row];
635: ap2 = ba + bi[row];
636: rmax2 = bimax[row];
637: nrow2 = bilen[row];
638: low2 = 0;
639: high2 = nrow2;
640: bm = aij->B->rmap->n;
641: ba = b->a;
642: inserted = PETSC_FALSE;
643: } else if (col < 0) {
644: if (1 == ((Mat_SeqAIJ*)(aij->B->data))->nonew) {
645: PetscInfo3(mat,"Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%D,%D)\n",(double)PetscRealPart(value),im[i],in[j]);
646: } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
647: }
648: } else col = in[j];
649: nonew = b->nonew;
650: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
651: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
652: if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
653: #endif
654: }
655: }
656: } else {
657: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
658: if (!aij->donotstash) {
659: mat->assembled = PETSC_FALSE;
660: if (roworiented) {
661: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
662: } else {
663: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
664: }
665: }
666: }
667: }
668: return(0);
669: }
671: /*
672: This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
673: The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
674: No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
675: */
676: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[])
677: {
678: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
679: Mat A = aij->A; /* diagonal part of the matrix */
680: Mat B = aij->B; /* offdiagonal part of the matrix */
681: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
682: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
683: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,col;
684: PetscInt *ailen = a->ilen,*aj = a->j;
685: PetscInt *bilen = b->ilen,*bj = b->j;
686: PetscInt am = aij->A->rmap->n,j;
687: PetscInt diag_so_far = 0,dnz;
688: PetscInt offd_so_far = 0,onz;
691: /* Iterate over all rows of the matrix */
692: for (j=0; j<am; j++) {
693: dnz = onz = 0;
694: /* Iterate over all non-zero columns of the current row */
695: for (col=mat_i[j]; col<mat_i[j+1]; col++) {
696: /* If column is in the diagonal */
697: if (mat_j[col] >= cstart && mat_j[col] < cend) {
698: aj[diag_so_far++] = mat_j[col] - cstart;
699: dnz++;
700: } else { /* off-diagonal entries */
701: bj[offd_so_far++] = mat_j[col];
702: onz++;
703: }
704: }
705: ailen[j] = dnz;
706: bilen[j] = onz;
707: }
708: return(0);
709: }
711: /*
712: This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
713: The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
714: No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
715: Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
716: would not be true and the more complex MatSetValues_MPIAIJ has to be used.
717: */
718: PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat,const PetscInt mat_j[],const PetscInt mat_i[],const PetscScalar mat_a[])
719: {
720: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
721: Mat A = aij->A; /* diagonal part of the matrix */
722: Mat B = aij->B; /* offdiagonal part of the matrix */
723: Mat_SeqAIJ *aijd =(Mat_SeqAIJ*)(aij->A)->data,*aijo=(Mat_SeqAIJ*)(aij->B)->data;
724: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
725: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
726: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend;
727: PetscInt *ailen = a->ilen,*aj = a->j;
728: PetscInt *bilen = b->ilen,*bj = b->j;
729: PetscInt am = aij->A->rmap->n,j;
730: PetscInt *full_diag_i=aijd->i,*full_offd_i=aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
731: PetscInt col,dnz_row,onz_row,rowstart_diag,rowstart_offd;
732: PetscScalar *aa = a->a,*ba = b->a;
735: /* Iterate over all rows of the matrix */
736: for (j=0; j<am; j++) {
737: dnz_row = onz_row = 0;
738: rowstart_offd = full_offd_i[j];
739: rowstart_diag = full_diag_i[j];
740: /* Iterate over all non-zero columns of the current row */
741: for (col=mat_i[j]; col<mat_i[j+1]; col++) {
742: /* If column is in the diagonal */
743: if (mat_j[col] >= cstart && mat_j[col] < cend) {
744: aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
745: aa[rowstart_diag+dnz_row] = mat_a[col];
746: dnz_row++;
747: } else { /* off-diagonal entries */
748: bj[rowstart_offd+onz_row] = mat_j[col];
749: ba[rowstart_offd+onz_row] = mat_a[col];
750: onz_row++;
751: }
752: }
753: ailen[j] = dnz_row;
754: bilen[j] = onz_row;
755: }
756: return(0);
757: }
759: PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
760: {
761: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
763: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
764: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
767: for (i=0; i<m; i++) {
768: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
769: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
770: if (idxm[i] >= rstart && idxm[i] < rend) {
771: row = idxm[i] - rstart;
772: for (j=0; j<n; j++) {
773: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
774: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
775: if (idxn[j] >= cstart && idxn[j] < cend) {
776: col = idxn[j] - cstart;
777: MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);
778: } else {
779: if (!aij->colmap) {
780: MatCreateColmap_MPIAIJ_Private(mat);
781: }
782: #if defined(PETSC_USE_CTABLE)
783: PetscTableFind(aij->colmap,idxn[j]+1,&col);
784: col--;
785: #else
786: col = aij->colmap[idxn[j]] - 1;
787: #endif
788: if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
789: else {
790: MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);
791: }
792: }
793: }
794: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
795: }
796: return(0);
797: }
799: extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);
801: PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
802: {
803: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
805: PetscInt nstash,reallocs;
808: if (aij->donotstash || mat->nooffprocentries) return(0);
810: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
811: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
812: PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
813: return(0);
814: }
816: PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
817: {
818: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
819: Mat_SeqAIJ *a = (Mat_SeqAIJ*)aij->A->data;
821: PetscMPIInt n;
822: PetscInt i,j,rstart,ncols,flg;
823: PetscInt *row,*col;
824: PetscBool other_disassembled;
825: PetscScalar *val;
827: /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
830: if (!aij->donotstash && !mat->nooffprocentries) {
831: while (1) {
832: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
833: if (!flg) break;
835: for (i=0; i<n; ) {
836: /* Now identify the consecutive vals belonging to the same row */
837: for (j=i,rstart=row[j]; j<n; j++) {
838: if (row[j] != rstart) break;
839: }
840: if (j < n) ncols = j-i;
841: else ncols = n-i;
842: /* Now assemble all these values with a single function call */
843: MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);
845: i = j;
846: }
847: }
848: MatStashScatterEnd_Private(&mat->stash);
849: }
850: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
851: if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
852: /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
853: if (mat->boundtocpu) {
854: MatBindToCPU(aij->A,PETSC_TRUE);
855: MatBindToCPU(aij->B,PETSC_TRUE);
856: }
857: #endif
858: MatAssemblyBegin(aij->A,mode);
859: MatAssemblyEnd(aij->A,mode);
861: /* determine if any processor has disassembled, if so we must
862: also disassemble ourself, in order that we may reassemble. */
863: /*
864: if nonzero structure of submatrix B cannot change then we know that
865: no processor disassembled thus we can skip this stuff
866: */
867: if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
868: MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));
869: if (mat->was_assembled && !other_disassembled) {
870: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
871: aij->B->offloadmask = PETSC_OFFLOAD_BOTH; /* do not copy on the GPU when assembling inside MatDisAssemble_MPIAIJ */
872: #endif
873: MatDisAssemble_MPIAIJ(mat);
874: }
875: }
876: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
877: MatSetUpMultiply_MPIAIJ(mat);
878: }
879: MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);
880: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
881: if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
882: #endif
883: MatAssemblyBegin(aij->B,mode);
884: MatAssemblyEnd(aij->B,mode);
886: PetscFree2(aij->rowvalues,aij->rowindices);
888: aij->rowvalues = 0;
890: VecDestroy(&aij->diag);
891: if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;
893: /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
894: if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
895: PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
896: MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));
897: }
898: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
899: mat->offloadmask = PETSC_OFFLOAD_BOTH;
900: #endif
901: return(0);
902: }
904: PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
905: {
906: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
910: MatZeroEntries(l->A);
911: MatZeroEntries(l->B);
912: return(0);
913: }
915: PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
916: {
917: Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data;
918: PetscObjectState sA, sB;
919: PetscInt *lrows;
920: PetscInt r, len;
921: PetscBool cong, lch, gch;
922: PetscErrorCode ierr;
925: /* get locally owned rows */
926: MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);
927: MatHasCongruentLayouts(A,&cong);
928: /* fix right hand side if needed */
929: if (x && b) {
930: const PetscScalar *xx;
931: PetscScalar *bb;
933: if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
934: VecGetArrayRead(x, &xx);
935: VecGetArray(b, &bb);
936: for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
937: VecRestoreArrayRead(x, &xx);
938: VecRestoreArray(b, &bb);
939: }
941: sA = mat->A->nonzerostate;
942: sB = mat->B->nonzerostate;
944: if (diag != 0.0 && cong) {
945: MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);
946: MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
947: } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
948: Mat_SeqAIJ *aijA = (Mat_SeqAIJ*)mat->A->data;
949: Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)mat->B->data;
950: PetscInt nnwA, nnwB;
951: PetscBool nnzA, nnzB;
953: nnwA = aijA->nonew;
954: nnwB = aijB->nonew;
955: nnzA = aijA->keepnonzeropattern;
956: nnzB = aijB->keepnonzeropattern;
957: if (!nnzA) {
958: PetscInfo(mat->A,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n");
959: aijA->nonew = 0;
960: }
961: if (!nnzB) {
962: PetscInfo(mat->B,"Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n");
963: aijB->nonew = 0;
964: }
965: /* Must zero here before the next loop */
966: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
967: MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
968: for (r = 0; r < len; ++r) {
969: const PetscInt row = lrows[r] + A->rmap->rstart;
970: if (row >= A->cmap->N) continue;
971: MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);
972: }
973: aijA->nonew = nnwA;
974: aijB->nonew = nnwB;
975: } else {
976: MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);
977: MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);
978: }
979: PetscFree(lrows);
980: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
981: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
983: /* reduce nonzerostate */
984: lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
985: MPIU_Allreduce(&lch,&gch,1,MPIU_BOOL,MPI_LOR,PetscObjectComm((PetscObject)A));
986: if (gch) A->nonzerostate++;
987: return(0);
988: }
990: PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
991: {
992: Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data;
993: PetscErrorCode ierr;
994: PetscMPIInt n = A->rmap->n;
995: PetscInt i,j,r,m,len = 0;
996: PetscInt *lrows,*owners = A->rmap->range;
997: PetscMPIInt p = 0;
998: PetscSFNode *rrows;
999: PetscSF sf;
1000: const PetscScalar *xx;
1001: PetscScalar *bb,*mask;
1002: Vec xmask,lmask;
1003: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)l->B->data;
1004: const PetscInt *aj, *ii,*ridx;
1005: PetscScalar *aa;
1008: /* Create SF where leaves are input rows and roots are owned rows */
1009: PetscMalloc1(n, &lrows);
1010: for (r = 0; r < n; ++r) lrows[r] = -1;
1011: PetscMalloc1(N, &rrows);
1012: for (r = 0; r < N; ++r) {
1013: const PetscInt idx = rows[r];
1014: if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
1015: if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
1016: PetscLayoutFindOwner(A->rmap,idx,&p);
1017: }
1018: rrows[r].rank = p;
1019: rrows[r].index = rows[r] - owners[p];
1020: }
1021: PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);
1022: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);
1023: /* Collect flags for rows to be zeroed */
1024: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1025: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);
1026: PetscSFDestroy(&sf);
1027: /* Compress and put in row numbers */
1028: for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
1029: /* zero diagonal part of matrix */
1030: MatZeroRowsColumns(l->A,len,lrows,diag,x,b);
1031: /* handle off diagonal part of matrix */
1032: MatCreateVecs(A,&xmask,NULL);
1033: VecDuplicate(l->lvec,&lmask);
1034: VecGetArray(xmask,&bb);
1035: for (i=0; i<len; i++) bb[lrows[i]] = 1;
1036: VecRestoreArray(xmask,&bb);
1037: VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1038: VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);
1039: VecDestroy(&xmask);
1040: if (x && b) { /* this code is buggy when the row and column layout don't match */
1041: PetscBool cong;
1043: MatHasCongruentLayouts(A,&cong);
1044: if (!cong) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Need matching row/col layout");
1045: VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1046: VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);
1047: VecGetArrayRead(l->lvec,&xx);
1048: VecGetArray(b,&bb);
1049: }
1050: VecGetArray(lmask,&mask);
1051: /* remove zeroed rows of off diagonal matrix */
1052: ii = aij->i;
1053: for (i=0; i<len; i++) {
1054: PetscArrayzero(aij->a + ii[lrows[i]],ii[lrows[i]+1] - ii[lrows[i]]);
1055: }
1056: /* loop over all elements of off process part of matrix zeroing removed columns*/
1057: if (aij->compressedrow.use) {
1058: m = aij->compressedrow.nrows;
1059: ii = aij->compressedrow.i;
1060: ridx = aij->compressedrow.rindex;
1061: for (i=0; i<m; i++) {
1062: n = ii[i+1] - ii[i];
1063: aj = aij->j + ii[i];
1064: aa = aij->a + ii[i];
1066: for (j=0; j<n; j++) {
1067: if (PetscAbsScalar(mask[*aj])) {
1068: if (b) bb[*ridx] -= *aa*xx[*aj];
1069: *aa = 0.0;
1070: }
1071: aa++;
1072: aj++;
1073: }
1074: ridx++;
1075: }
1076: } else { /* do not use compressed row format */
1077: m = l->B->rmap->n;
1078: for (i=0; i<m; i++) {
1079: n = ii[i+1] - ii[i];
1080: aj = aij->j + ii[i];
1081: aa = aij->a + ii[i];
1082: for (j=0; j<n; j++) {
1083: if (PetscAbsScalar(mask[*aj])) {
1084: if (b) bb[i] -= *aa*xx[*aj];
1085: *aa = 0.0;
1086: }
1087: aa++;
1088: aj++;
1089: }
1090: }
1091: }
1092: if (x && b) {
1093: VecRestoreArray(b,&bb);
1094: VecRestoreArrayRead(l->lvec,&xx);
1095: }
1096: VecRestoreArray(lmask,&mask);
1097: VecDestroy(&lmask);
1098: PetscFree(lrows);
1100: /* only change matrix nonzero state if pattern was allowed to be changed */
1101: if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
1102: PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1103: MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));
1104: }
1105: return(0);
1106: }
1108: PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
1109: {
1110: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1112: PetscInt nt;
1113: VecScatter Mvctx = a->Mvctx;
1116: VecGetLocalSize(xx,&nt);
1117: if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
1119: VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1120: (*a->A->ops->mult)(a->A,xx,yy);
1121: VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1122: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
1123: return(0);
1124: }
1126: PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
1127: {
1128: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1132: MatMultDiagonalBlock(a->A,bb,xx);
1133: return(0);
1134: }
1136: PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1137: {
1138: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1140: VecScatter Mvctx = a->Mvctx;
1143: if (a->Mvctx_mpi1_flg) Mvctx = a->Mvctx_mpi1;
1144: VecScatterBegin(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1145: (*a->A->ops->multadd)(a->A,xx,yy,zz);
1146: VecScatterEnd(Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
1147: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
1148: return(0);
1149: }
1151: PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
1152: {
1153: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1157: /* do nondiagonal part */
1158: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1159: /* do local part */
1160: (*a->A->ops->multtranspose)(a->A,xx,yy);
1161: /* add partial results together */
1162: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1163: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
1164: return(0);
1165: }
1167: PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool *f)
1168: {
1169: MPI_Comm comm;
1170: Mat_MPIAIJ *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1171: Mat Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1172: IS Me,Notme;
1174: PetscInt M,N,first,last,*notme,i;
1175: PetscBool lf;
1176: PetscMPIInt size;
1179: /* Easy test: symmetric diagonal block */
1180: Bij = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1181: MatIsTranspose(Adia,Bdia,tol,&lf);
1182: MPIU_Allreduce(&lf,f,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)Amat));
1183: if (!*f) return(0);
1184: PetscObjectGetComm((PetscObject)Amat,&comm);
1185: MPI_Comm_size(comm,&size);
1186: if (size == 1) return(0);
1188: /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1189: MatGetSize(Amat,&M,&N);
1190: MatGetOwnershipRange(Amat,&first,&last);
1191: PetscMalloc1(N-last+first,¬me);
1192: for (i=0; i<first; i++) notme[i] = i;
1193: for (i=last; i<M; i++) notme[i-last+first] = i;
1194: ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);
1195: ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);
1196: MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);
1197: Aoff = Aoffs[0];
1198: MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);
1199: Boff = Boffs[0];
1200: MatIsTranspose(Aoff,Boff,tol,f);
1201: MatDestroyMatrices(1,&Aoffs);
1202: MatDestroyMatrices(1,&Boffs);
1203: ISDestroy(&Me);
1204: ISDestroy(&Notme);
1205: PetscFree(notme);
1206: return(0);
1207: }
1209: PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A,PetscReal tol,PetscBool *f)
1210: {
1214: MatIsTranspose_MPIAIJ(A,A,tol,f);
1215: return(0);
1216: }
1218: PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1219: {
1220: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1224: /* do nondiagonal part */
1225: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
1226: /* do local part */
1227: (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);
1228: /* add partial results together */
1229: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1230: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
1231: return(0);
1232: }
1234: /*
1235: This only works correctly for square matrices where the subblock A->A is the
1236: diagonal block
1237: */
1238: PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1239: {
1241: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1244: if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1245: if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1246: MatGetDiagonal(a->A,v);
1247: return(0);
1248: }
1250: PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1251: {
1252: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1256: MatScale(a->A,aa);
1257: MatScale(a->B,aa);
1258: return(0);
1259: }
1261: PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1262: {
1263: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1267: #if defined(PETSC_USE_LOG)
1268: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1269: #endif
1270: MatStashDestroy_Private(&mat->stash);
1271: VecDestroy(&aij->diag);
1272: MatDestroy(&aij->A);
1273: MatDestroy(&aij->B);
1274: #if defined(PETSC_USE_CTABLE)
1275: PetscTableDestroy(&aij->colmap);
1276: #else
1277: PetscFree(aij->colmap);
1278: #endif
1279: PetscFree(aij->garray);
1280: VecDestroy(&aij->lvec);
1281: VecScatterDestroy(&aij->Mvctx);
1282: if (aij->Mvctx_mpi1) {VecScatterDestroy(&aij->Mvctx_mpi1);}
1283: PetscFree2(aij->rowvalues,aij->rowindices);
1284: PetscFree(aij->ld);
1285: PetscFree(mat->data);
1287: PetscObjectChangeTypeName((PetscObject)mat,0);
1288: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);
1289: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);
1290: PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);
1291: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);
1292: PetscObjectComposeFunction((PetscObject)mat,"MatResetPreallocation_C",NULL);
1293: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);
1294: PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);
1295: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpibaij_C",NULL);
1296: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);
1297: #if defined(PETSC_HAVE_ELEMENTAL)
1298: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);
1299: #endif
1300: #if defined(PETSC_HAVE_HYPRE)
1301: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);
1302: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",NULL);
1303: #endif
1304: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_is_C",NULL);
1305: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_is_mpiaij_C",NULL);
1306: PetscObjectComposeFunction((PetscObject)mat,"MatProductSetFromOptions_mpiaij_mpiaij_C",NULL);
1307: return(0);
1308: }
1310: PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1311: {
1312: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1313: Mat_SeqAIJ *A = (Mat_SeqAIJ*)aij->A->data;
1314: Mat_SeqAIJ *B = (Mat_SeqAIJ*)aij->B->data;
1315: const PetscInt *garray = aij->garray;
1316: PetscInt header[4],M,N,m,rs,cs,nz,cnt,i,ja,jb;
1317: PetscInt *rowlens;
1318: PetscInt *colidxs;
1319: PetscScalar *matvals;
1320: PetscErrorCode ierr;
1323: PetscViewerSetUp(viewer);
1325: M = mat->rmap->N;
1326: N = mat->cmap->N;
1327: m = mat->rmap->n;
1328: rs = mat->rmap->rstart;
1329: cs = mat->cmap->rstart;
1330: nz = A->nz + B->nz;
1332: /* write matrix header */
1333: header[0] = MAT_FILE_CLASSID;
1334: header[1] = M; header[2] = N; header[3] = nz;
1335: MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));
1336: PetscViewerBinaryWrite(viewer,header,4,PETSC_INT);
1338: /* fill in and store row lengths */
1339: PetscMalloc1(m,&rowlens);
1340: for (i=0; i<m; i++) rowlens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1341: PetscViewerBinaryWriteAll(viewer,rowlens,m,rs,M,PETSC_INT);
1342: PetscFree(rowlens);
1344: /* fill in and store column indices */
1345: PetscMalloc1(nz,&colidxs);
1346: for (cnt=0, i=0; i<m; i++) {
1347: for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1348: if (garray[B->j[jb]] > cs) break;
1349: colidxs[cnt++] = garray[B->j[jb]];
1350: }
1351: for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1352: colidxs[cnt++] = A->j[ja] + cs;
1353: for (; jb<B->i[i+1]; jb++)
1354: colidxs[cnt++] = garray[B->j[jb]];
1355: }
1356: if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1357: PetscViewerBinaryWriteAll(viewer,colidxs,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
1358: PetscFree(colidxs);
1360: /* fill in and store nonzero values */
1361: PetscMalloc1(nz,&matvals);
1362: for (cnt=0, i=0; i<m; i++) {
1363: for (jb=B->i[i]; jb<B->i[i+1]; jb++) {
1364: if (garray[B->j[jb]] > cs) break;
1365: matvals[cnt++] = B->a[jb];
1366: }
1367: for (ja=A->i[i]; ja<A->i[i+1]; ja++)
1368: matvals[cnt++] = A->a[ja];
1369: for (; jb<B->i[i+1]; jb++)
1370: matvals[cnt++] = B->a[jb];
1371: }
1372: if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz);
1373: PetscViewerBinaryWriteAll(viewer,matvals,nz,PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
1374: PetscFree(matvals);
1376: /* write block size option to the viewer's .info file */
1377: MatView_Binary_BlockSizes(mat,viewer);
1378: return(0);
1379: }
1381: #include <petscdraw.h>
1382: PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1383: {
1384: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1385: PetscErrorCode ierr;
1386: PetscMPIInt rank = aij->rank,size = aij->size;
1387: PetscBool isdraw,iascii,isbinary;
1388: PetscViewer sviewer;
1389: PetscViewerFormat format;
1392: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1393: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1394: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1395: if (iascii) {
1396: PetscViewerGetFormat(viewer,&format);
1397: if (format == PETSC_VIEWER_LOAD_BALANCE) {
1398: PetscInt i,nmax = 0,nmin = PETSC_MAX_INT,navg = 0,*nz,nzlocal = ((Mat_SeqAIJ*) (aij->A->data))->nz + ((Mat_SeqAIJ*) (aij->B->data))->nz;
1399: PetscMalloc1(size,&nz);
1400: MPI_Allgather(&nzlocal,1,MPIU_INT,nz,1,MPIU_INT,PetscObjectComm((PetscObject)mat));
1401: for (i=0; i<(PetscInt)size; i++) {
1402: nmax = PetscMax(nmax,nz[i]);
1403: nmin = PetscMin(nmin,nz[i]);
1404: navg += nz[i];
1405: }
1406: PetscFree(nz);
1407: navg = navg/size;
1408: PetscViewerASCIIPrintf(viewer,"Load Balance - Nonzeros: Min %D avg %D max %D\n",nmin,navg,nmax);
1409: return(0);
1410: }
1411: PetscViewerGetFormat(viewer,&format);
1412: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1413: MatInfo info;
1414: PetscBool inodes;
1416: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);
1417: MatGetInfo(mat,MAT_LOCAL,&info);
1418: MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);
1419: PetscViewerASCIIPushSynchronized(viewer);
1420: if (!inodes) {
1421: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, not using I-node routines\n",
1422: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1423: } else {
1424: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %g, using I-node routines\n",
1425: rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(double)info.memory);
1426: }
1427: MatGetInfo(aij->A,MAT_LOCAL,&info);
1428: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1429: MatGetInfo(aij->B,MAT_LOCAL,&info);
1430: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
1431: PetscViewerFlush(viewer);
1432: PetscViewerASCIIPopSynchronized(viewer);
1433: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
1434: VecScatterView(aij->Mvctx,viewer);
1435: return(0);
1436: } else if (format == PETSC_VIEWER_ASCII_INFO) {
1437: PetscInt inodecount,inodelimit,*inodes;
1438: MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);
1439: if (inodes) {
1440: PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);
1441: } else {
1442: PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");
1443: }
1444: return(0);
1445: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1446: return(0);
1447: }
1448: } else if (isbinary) {
1449: if (size == 1) {
1450: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1451: MatView(aij->A,viewer);
1452: } else {
1453: MatView_MPIAIJ_Binary(mat,viewer);
1454: }
1455: return(0);
1456: } else if (iascii && size == 1) {
1457: PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);
1458: MatView(aij->A,viewer);
1459: return(0);
1460: } else if (isdraw) {
1461: PetscDraw draw;
1462: PetscBool isnull;
1463: PetscViewerDrawGetDraw(viewer,0,&draw);
1464: PetscDrawIsNull(draw,&isnull);
1465: if (isnull) return(0);
1466: }
1468: { /* assemble the entire matrix onto first processor */
1469: Mat A = NULL, Av;
1470: IS isrow,iscol;
1472: ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1473: ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1474: MatCreateSubMatrix(mat,isrow,iscol,MAT_INITIAL_MATRIX,&A);
1475: MatMPIAIJGetSeqAIJ(A,&Av,NULL,NULL);
1476: /* The commented code uses MatCreateSubMatrices instead */
1477: /*
1478: Mat *AA, A = NULL, Av;
1479: IS isrow,iscol;
1481: ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->rmap->N : 0,0,1,&isrow);
1482: ISCreateStride(PetscObjectComm((PetscObject)mat),!rank ? mat->cmap->N : 0,0,1,&iscol);
1483: MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA);
1484: if (!rank) {
1485: PetscObjectReference((PetscObject)AA[0]);
1486: A = AA[0];
1487: Av = AA[0];
1488: }
1489: MatDestroySubMatrices(1,&AA);
1490: */
1491: ISDestroy(&iscol);
1492: ISDestroy(&isrow);
1493: /*
1494: Everyone has to call to draw the matrix since the graphics waits are
1495: synchronized across all processors that share the PetscDraw object
1496: */
1497: PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1498: if (!rank) {
1499: if (((PetscObject)mat)->name) {
1500: PetscObjectSetName((PetscObject)Av,((PetscObject)mat)->name);
1501: }
1502: MatView_SeqAIJ(Av,sviewer);
1503: }
1504: PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);
1505: PetscViewerFlush(viewer);
1506: MatDestroy(&A);
1507: }
1508: return(0);
1509: }
1511: PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1512: {
1514: PetscBool iascii,isdraw,issocket,isbinary;
1517: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1518: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
1519: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1520: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
1521: if (iascii || isdraw || isbinary || issocket) {
1522: MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);
1523: }
1524: return(0);
1525: }
1527: PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1528: {
1529: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1531: Vec bb1 = 0;
1532: PetscBool hasop;
1535: if (flag == SOR_APPLY_UPPER) {
1536: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1537: return(0);
1538: }
1540: if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1541: VecDuplicate(bb,&bb1);
1542: }
1544: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1545: if (flag & SOR_ZERO_INITIAL_GUESS) {
1546: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1547: its--;
1548: }
1550: while (its--) {
1551: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1552: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1554: /* update rhs: bb1 = bb - B*x */
1555: VecScale(mat->lvec,-1.0);
1556: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1558: /* local sweep */
1559: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);
1560: }
1561: } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1562: if (flag & SOR_ZERO_INITIAL_GUESS) {
1563: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1564: its--;
1565: }
1566: while (its--) {
1567: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1568: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1570: /* update rhs: bb1 = bb - B*x */
1571: VecScale(mat->lvec,-1.0);
1572: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1574: /* local sweep */
1575: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);
1576: }
1577: } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1578: if (flag & SOR_ZERO_INITIAL_GUESS) {
1579: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
1580: its--;
1581: }
1582: while (its--) {
1583: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1584: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1586: /* update rhs: bb1 = bb - B*x */
1587: VecScale(mat->lvec,-1.0);
1588: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);
1590: /* local sweep */
1591: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);
1592: }
1593: } else if (flag & SOR_EISENSTAT) {
1594: Vec xx1;
1596: VecDuplicate(bb,&xx1);
1597: (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);
1599: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1600: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
1601: if (!mat->diag) {
1602: MatCreateVecs(matin,&mat->diag,NULL);
1603: MatGetDiagonal(matin,mat->diag);
1604: }
1605: MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
1606: if (hasop) {
1607: MatMultDiagonalBlock(matin,xx,bb1);
1608: } else {
1609: VecPointwiseMult(bb1,mat->diag,xx);
1610: }
1611: VecAYPX(bb1,(omega-2.0)/omega,bb);
1613: MatMultAdd(mat->B,mat->lvec,bb1,bb1);
1615: /* local sweep */
1616: (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
1617: VecAXPY(xx,1.0,xx1);
1618: VecDestroy(&xx1);
1619: } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");
1621: VecDestroy(&bb1);
1623: matin->factorerrortype = mat->A->factorerrortype;
1624: return(0);
1625: }
1627: PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1628: {
1629: Mat aA,aB,Aperm;
1630: const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1631: PetscScalar *aa,*ba;
1632: PetscInt i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1633: PetscSF rowsf,sf;
1634: IS parcolp = NULL;
1635: PetscBool done;
1639: MatGetLocalSize(A,&m,&n);
1640: ISGetIndices(rowp,&rwant);
1641: ISGetIndices(colp,&cwant);
1642: PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);
1644: /* Invert row permutation to find out where my rows should go */
1645: PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);
1646: PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);
1647: PetscSFSetFromOptions(rowsf);
1648: for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1649: PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1650: PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);
1652: /* Invert column permutation to find out where my columns should go */
1653: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1654: PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);
1655: PetscSFSetFromOptions(sf);
1656: for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1657: PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1658: PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);
1659: PetscSFDestroy(&sf);
1661: ISRestoreIndices(rowp,&rwant);
1662: ISRestoreIndices(colp,&cwant);
1663: MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);
1665: /* Find out where my gcols should go */
1666: MatGetSize(aB,NULL,&ng);
1667: PetscMalloc1(ng,&gcdest);
1668: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
1669: PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);
1670: PetscSFSetFromOptions(sf);
1671: PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);
1672: PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);
1673: PetscSFDestroy(&sf);
1675: PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);
1676: MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1677: MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1678: for (i=0; i<m; i++) {
1679: PetscInt row = rdest[i];
1680: PetscMPIInt rowner;
1681: PetscLayoutFindOwner(A->rmap,row,&rowner);
1682: for (j=ai[i]; j<ai[i+1]; j++) {
1683: PetscInt col = cdest[aj[j]];
1684: PetscMPIInt cowner;
1685: PetscLayoutFindOwner(A->cmap,col,&cowner); /* Could build an index for the columns to eliminate this search */
1686: if (rowner == cowner) dnnz[i]++;
1687: else onnz[i]++;
1688: }
1689: for (j=bi[i]; j<bi[i+1]; j++) {
1690: PetscInt col = gcdest[bj[j]];
1691: PetscMPIInt cowner;
1692: PetscLayoutFindOwner(A->cmap,col,&cowner);
1693: if (rowner == cowner) dnnz[i]++;
1694: else onnz[i]++;
1695: }
1696: }
1697: PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);
1698: PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);
1699: PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);
1700: PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);
1701: PetscSFDestroy(&rowsf);
1703: MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);
1704: MatSeqAIJGetArray(aA,&aa);
1705: MatSeqAIJGetArray(aB,&ba);
1706: for (i=0; i<m; i++) {
1707: PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1708: PetscInt j0,rowlen;
1709: rowlen = ai[i+1] - ai[i];
1710: for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1711: for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1712: MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);
1713: }
1714: rowlen = bi[i+1] - bi[i];
1715: for (j0=j=0; j<rowlen; j0=j) {
1716: for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1717: MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);
1718: }
1719: }
1720: MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);
1721: MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);
1722: MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);
1723: MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);
1724: MatSeqAIJRestoreArray(aA,&aa);
1725: MatSeqAIJRestoreArray(aB,&ba);
1726: PetscFree4(dnnz,onnz,tdnnz,tonnz);
1727: PetscFree3(work,rdest,cdest);
1728: PetscFree(gcdest);
1729: if (parcolp) {ISDestroy(&colp);}
1730: *B = Aperm;
1731: return(0);
1732: }
1734: PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1735: {
1736: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1740: MatGetSize(aij->B,NULL,nghosts);
1741: if (ghosts) *ghosts = aij->garray;
1742: return(0);
1743: }
1745: PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1746: {
1747: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1748: Mat A = mat->A,B = mat->B;
1750: PetscLogDouble isend[5],irecv[5];
1753: info->block_size = 1.0;
1754: MatGetInfo(A,MAT_LOCAL,info);
1756: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1757: isend[3] = info->memory; isend[4] = info->mallocs;
1759: MatGetInfo(B,MAT_LOCAL,info);
1761: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1762: isend[3] += info->memory; isend[4] += info->mallocs;
1763: if (flag == MAT_LOCAL) {
1764: info->nz_used = isend[0];
1765: info->nz_allocated = isend[1];
1766: info->nz_unneeded = isend[2];
1767: info->memory = isend[3];
1768: info->mallocs = isend[4];
1769: } else if (flag == MAT_GLOBAL_MAX) {
1770: MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_MAX,PetscObjectComm((PetscObject)matin));
1772: info->nz_used = irecv[0];
1773: info->nz_allocated = irecv[1];
1774: info->nz_unneeded = irecv[2];
1775: info->memory = irecv[3];
1776: info->mallocs = irecv[4];
1777: } else if (flag == MAT_GLOBAL_SUM) {
1778: MPIU_Allreduce(isend,irecv,5,MPIU_PETSCLOGDOUBLE,MPI_SUM,PetscObjectComm((PetscObject)matin));
1780: info->nz_used = irecv[0];
1781: info->nz_allocated = irecv[1];
1782: info->nz_unneeded = irecv[2];
1783: info->memory = irecv[3];
1784: info->mallocs = irecv[4];
1785: }
1786: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1787: info->fill_ratio_needed = 0;
1788: info->factor_mallocs = 0;
1789: return(0);
1790: }
1792: PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1793: {
1794: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
1798: switch (op) {
1799: case MAT_NEW_NONZERO_LOCATIONS:
1800: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1801: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1802: case MAT_KEEP_NONZERO_PATTERN:
1803: case MAT_NEW_NONZERO_LOCATION_ERR:
1804: case MAT_IGNORE_ZERO_ENTRIES:
1805: MatCheckPreallocated(A,1);
1806: MatSetOption(a->A,op,flg);
1807: MatSetOption(a->B,op,flg);
1808: break;
1809: case MAT_USE_INODES:
1810: if (PetscUnlikely(!(A)->preallocated)) {
1811: a->inode_setoption = PETSC_TRUE; /* option will be set in MatMPIAIJSetPreallocation_MPIAIJ() */
1812: a->inode_use = flg;
1813: } else {
1814: a->inode_setoption = PETSC_FALSE;
1815: MatSetOption(a->A,op,flg);
1816: MatSetOption(a->B,op,flg);
1817: }
1818: break;
1819: case MAT_ROW_ORIENTED:
1820: MatCheckPreallocated(A,1);
1821: a->roworiented = flg;
1823: MatSetOption(a->A,op,flg);
1824: MatSetOption(a->B,op,flg);
1825: break;
1826: case MAT_NEW_DIAGONALS:
1827: case MAT_SORTED_FULL:
1828: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1829: break;
1830: case MAT_IGNORE_OFF_PROC_ENTRIES:
1831: a->donotstash = flg;
1832: break;
1833: /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1834: case MAT_SPD:
1835: case MAT_SYMMETRIC:
1836: case MAT_STRUCTURALLY_SYMMETRIC:
1837: case MAT_HERMITIAN:
1838: case MAT_SYMMETRY_ETERNAL:
1839: break;
1840: case MAT_SUBMAT_SINGLEIS:
1841: A->submat_singleis = flg;
1842: break;
1843: case MAT_STRUCTURE_ONLY:
1844: /* The option is handled directly by MatSetOption() */
1845: break;
1846: default:
1847: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1848: }
1849: return(0);
1850: }
1852: PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1853: {
1854: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data;
1855: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
1857: PetscInt i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1858: PetscInt nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1859: PetscInt *cmap,*idx_p;
1862: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1863: mat->getrowactive = PETSC_TRUE;
1865: if (!mat->rowvalues && (idx || v)) {
1866: /*
1867: allocate enough space to hold information from the longest row.
1868: */
1869: Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1870: PetscInt max = 1,tmp;
1871: for (i=0; i<matin->rmap->n; i++) {
1872: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1873: if (max < tmp) max = tmp;
1874: }
1875: PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);
1876: }
1878: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1879: lrow = row - rstart;
1881: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1882: if (!v) {pvA = 0; pvB = 0;}
1883: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1884: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1885: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1886: nztot = nzA + nzB;
1888: cmap = mat->garray;
1889: if (v || idx) {
1890: if (nztot) {
1891: /* Sort by increasing column numbers, assuming A and B already sorted */
1892: PetscInt imark = -1;
1893: if (v) {
1894: *v = v_p = mat->rowvalues;
1895: for (i=0; i<nzB; i++) {
1896: if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1897: else break;
1898: }
1899: imark = i;
1900: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1901: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1902: }
1903: if (idx) {
1904: *idx = idx_p = mat->rowindices;
1905: if (imark > -1) {
1906: for (i=0; i<imark; i++) {
1907: idx_p[i] = cmap[cworkB[i]];
1908: }
1909: } else {
1910: for (i=0; i<nzB; i++) {
1911: if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1912: else break;
1913: }
1914: imark = i;
1915: }
1916: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart + cworkA[i];
1917: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]];
1918: }
1919: } else {
1920: if (idx) *idx = 0;
1921: if (v) *v = 0;
1922: }
1923: }
1924: *nz = nztot;
1925: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1926: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1927: return(0);
1928: }
1930: PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1931: {
1932: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1935: if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1936: aij->getrowactive = PETSC_FALSE;
1937: return(0);
1938: }
1940: PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1941: {
1942: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1943: Mat_SeqAIJ *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1945: PetscInt i,j,cstart = mat->cmap->rstart;
1946: PetscReal sum = 0.0;
1947: MatScalar *v;
1950: if (aij->size == 1) {
1951: MatNorm(aij->A,type,norm);
1952: } else {
1953: if (type == NORM_FROBENIUS) {
1954: v = amat->a;
1955: for (i=0; i<amat->nz; i++) {
1956: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1957: }
1958: v = bmat->a;
1959: for (i=0; i<bmat->nz; i++) {
1960: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1961: }
1962: MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1963: *norm = PetscSqrtReal(*norm);
1964: PetscLogFlops(2*amat->nz+2*bmat->nz);
1965: } else if (type == NORM_1) { /* max column norm */
1966: PetscReal *tmp,*tmp2;
1967: PetscInt *jj,*garray = aij->garray;
1968: PetscCalloc1(mat->cmap->N+1,&tmp);
1969: PetscMalloc1(mat->cmap->N+1,&tmp2);
1970: *norm = 0.0;
1971: v = amat->a; jj = amat->j;
1972: for (j=0; j<amat->nz; j++) {
1973: tmp[cstart + *jj++] += PetscAbsScalar(*v); v++;
1974: }
1975: v = bmat->a; jj = bmat->j;
1976: for (j=0; j<bmat->nz; j++) {
1977: tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1978: }
1979: MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));
1980: for (j=0; j<mat->cmap->N; j++) {
1981: if (tmp2[j] > *norm) *norm = tmp2[j];
1982: }
1983: PetscFree(tmp);
1984: PetscFree(tmp2);
1985: PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
1986: } else if (type == NORM_INFINITY) { /* max row norm */
1987: PetscReal ntemp = 0.0;
1988: for (j=0; j<aij->A->rmap->n; j++) {
1989: v = amat->a + amat->i[j];
1990: sum = 0.0;
1991: for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1992: sum += PetscAbsScalar(*v); v++;
1993: }
1994: v = bmat->a + bmat->i[j];
1995: for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1996: sum += PetscAbsScalar(*v); v++;
1997: }
1998: if (sum > ntemp) ntemp = sum;
1999: }
2000: MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));
2001: PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));
2002: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
2003: }
2004: return(0);
2005: }
2007: PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
2008: {
2009: Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*b;
2010: Mat_SeqAIJ *Aloc =(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data,*sub_B_diag;
2011: PetscInt M = A->rmap->N,N=A->cmap->N,ma,na,mb,nb,row,*cols,*cols_tmp,*B_diag_ilen,i,ncol,A_diag_ncol;
2012: const PetscInt *ai,*aj,*bi,*bj,*B_diag_i;
2013: PetscErrorCode ierr;
2014: Mat B,A_diag,*B_diag;
2015: const MatScalar *array;
2018: ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
2019: ai = Aloc->i; aj = Aloc->j;
2020: bi = Bloc->i; bj = Bloc->j;
2021: if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
2022: PetscInt *d_nnz,*g_nnz,*o_nnz;
2023: PetscSFNode *oloc;
2024: PETSC_UNUSED PetscSF sf;
2026: PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);
2027: /* compute d_nnz for preallocation */
2028: PetscArrayzero(d_nnz,na);
2029: for (i=0; i<ai[ma]; i++) {
2030: d_nnz[aj[i]]++;
2031: }
2032: /* compute local off-diagonal contributions */
2033: PetscArrayzero(g_nnz,nb);
2034: for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
2035: /* map those to global */
2036: PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);
2037: PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);
2038: PetscSFSetFromOptions(sf);
2039: PetscArrayzero(o_nnz,na);
2040: PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2041: PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);
2042: PetscSFDestroy(&sf);
2044: MatCreate(PetscObjectComm((PetscObject)A),&B);
2045: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
2046: MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));
2047: MatSetType(B,((PetscObject)A)->type_name);
2048: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
2049: PetscFree4(d_nnz,o_nnz,g_nnz,oloc);
2050: } else {
2051: B = *matout;
2052: MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);
2053: }
2055: b = (Mat_MPIAIJ*)B->data;
2056: A_diag = a->A;
2057: B_diag = &b->A;
2058: sub_B_diag = (Mat_SeqAIJ*)(*B_diag)->data;
2059: A_diag_ncol = A_diag->cmap->N;
2060: B_diag_ilen = sub_B_diag->ilen;
2061: B_diag_i = sub_B_diag->i;
2063: /* Set ilen for diagonal of B */
2064: for (i=0; i<A_diag_ncol; i++) {
2065: B_diag_ilen[i] = B_diag_i[i+1] - B_diag_i[i];
2066: }
2068: /* Transpose the diagonal part of the matrix. In contrast to the offdiagonal part, this can be done
2069: very quickly (=without using MatSetValues), because all writes are local. */
2070: MatTranspose(A_diag,MAT_REUSE_MATRIX,B_diag);
2072: /* copy over the B part */
2073: PetscMalloc1(bi[mb],&cols);
2074: array = Bloc->a;
2075: row = A->rmap->rstart;
2076: for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
2077: cols_tmp = cols;
2078: for (i=0; i<mb; i++) {
2079: ncol = bi[i+1]-bi[i];
2080: MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);
2081: row++;
2082: array += ncol; cols_tmp += ncol;
2083: }
2084: PetscFree(cols);
2086: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
2087: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
2088: if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
2089: *matout = B;
2090: } else {
2091: MatHeaderMerge(A,&B);
2092: }
2093: return(0);
2094: }
2096: PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
2097: {
2098: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2099: Mat a = aij->A,b = aij->B;
2101: PetscInt s1,s2,s3;
2104: MatGetLocalSize(mat,&s2,&s3);
2105: if (rr) {
2106: VecGetLocalSize(rr,&s1);
2107: if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
2108: /* Overlap communication with computation. */
2109: VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2110: }
2111: if (ll) {
2112: VecGetLocalSize(ll,&s1);
2113: if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
2114: (*b->ops->diagonalscale)(b,ll,0);
2115: }
2116: /* scale the diagonal block */
2117: (*a->ops->diagonalscale)(a,ll,rr);
2119: if (rr) {
2120: /* Do a scatter end and then right scale the off-diagonal block */
2121: VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);
2122: (*b->ops->diagonalscale)(b,0,aij->lvec);
2123: }
2124: return(0);
2125: }
2127: PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2128: {
2129: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2133: MatSetUnfactored(a->A);
2134: return(0);
2135: }
2137: PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool *flag)
2138: {
2139: Mat_MPIAIJ *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2140: Mat a,b,c,d;
2141: PetscBool flg;
2145: a = matA->A; b = matA->B;
2146: c = matB->A; d = matB->B;
2148: MatEqual(a,c,&flg);
2149: if (flg) {
2150: MatEqual(b,d,&flg);
2151: }
2152: MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
2153: return(0);
2154: }
2156: PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2157: {
2159: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2160: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
2163: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2164: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2165: /* because of the column compression in the off-processor part of the matrix a->B,
2166: the number of columns in a->B and b->B may be different, hence we cannot call
2167: the MatCopy() directly on the two parts. If need be, we can provide a more
2168: efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2169: then copying the submatrices */
2170: MatCopy_Basic(A,B,str);
2171: } else {
2172: MatCopy(a->A,b->A,str);
2173: MatCopy(a->B,b->B,str);
2174: }
2175: PetscObjectStateIncrease((PetscObject)B);
2176: return(0);
2177: }
2179: PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2180: {
2184: MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);
2185: return(0);
2186: }
2188: /*
2189: Computes the number of nonzeros per row needed for preallocation when X and Y
2190: have different nonzero structure.
2191: */
2192: PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2193: {
2194: PetscInt i,j,k,nzx,nzy;
2197: /* Set the number of nonzeros in the new matrix */
2198: for (i=0; i<m; i++) {
2199: const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2200: nzx = xi[i+1] - xi[i];
2201: nzy = yi[i+1] - yi[i];
2202: nnz[i] = 0;
2203: for (j=0,k=0; j<nzx; j++) { /* Point in X */
2204: for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2205: if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++; /* Skip duplicate */
2206: nnz[i]++;
2207: }
2208: for (; k<nzy; k++) nnz[i]++;
2209: }
2210: return(0);
2211: }
2213: /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2214: static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2215: {
2217: PetscInt m = Y->rmap->N;
2218: Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data;
2219: Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data;
2222: MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);
2223: return(0);
2224: }
2226: PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2227: {
2229: Mat_MPIAIJ *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2230: PetscBLASInt bnz,one=1;
2231: Mat_SeqAIJ *x,*y;
2234: if (str == SAME_NONZERO_PATTERN) {
2235: PetscScalar alpha = a;
2236: x = (Mat_SeqAIJ*)xx->A->data;
2237: PetscBLASIntCast(x->nz,&bnz);
2238: y = (Mat_SeqAIJ*)yy->A->data;
2239: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2240: x = (Mat_SeqAIJ*)xx->B->data;
2241: y = (Mat_SeqAIJ*)yy->B->data;
2242: PetscBLASIntCast(x->nz,&bnz);
2243: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2244: PetscObjectStateIncrease((PetscObject)Y);
2245: /* the MatAXPY_Basic* subroutines calls MatAssembly, so the matrix on the GPU
2246: will be updated */
2247: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
2248: if (Y->offloadmask != PETSC_OFFLOAD_UNALLOCATED) {
2249: Y->offloadmask = PETSC_OFFLOAD_CPU;
2250: }
2251: #endif
2252: } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2253: MatAXPY_Basic(Y,a,X,str);
2254: } else {
2255: Mat B;
2256: PetscInt *nnz_d,*nnz_o;
2257: PetscMalloc1(yy->A->rmap->N,&nnz_d);
2258: PetscMalloc1(yy->B->rmap->N,&nnz_o);
2259: MatCreate(PetscObjectComm((PetscObject)Y),&B);
2260: PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);
2261: MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);
2262: MatSetBlockSizesFromMats(B,Y,Y);
2263: MatSetType(B,MATMPIAIJ);
2264: MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);
2265: MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);
2266: MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);
2267: MatAXPY_BasicWithPreallocation(B,Y,a,X,str);
2268: MatHeaderReplace(Y,&B);
2269: PetscFree(nnz_d);
2270: PetscFree(nnz_o);
2271: }
2272: return(0);
2273: }
2275: extern PetscErrorCode MatConjugate_SeqAIJ(Mat);
2277: PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2278: {
2279: #if defined(PETSC_USE_COMPLEX)
2281: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2284: MatConjugate_SeqAIJ(aij->A);
2285: MatConjugate_SeqAIJ(aij->B);
2286: #else
2288: #endif
2289: return(0);
2290: }
2292: PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2293: {
2294: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2298: MatRealPart(a->A);
2299: MatRealPart(a->B);
2300: return(0);
2301: }
2303: PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2304: {
2305: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2309: MatImaginaryPart(a->A);
2310: MatImaginaryPart(a->B);
2311: return(0);
2312: }
2314: PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2315: {
2316: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2318: PetscInt i,*idxb = 0;
2319: PetscScalar *va,*vb;
2320: Vec vtmp;
2323: MatGetRowMaxAbs(a->A,v,idx);
2324: VecGetArray(v,&va);
2325: if (idx) {
2326: for (i=0; i<A->rmap->n; i++) {
2327: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2328: }
2329: }
2331: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2332: if (idx) {
2333: PetscMalloc1(A->rmap->n,&idxb);
2334: }
2335: MatGetRowMaxAbs(a->B,vtmp,idxb);
2336: VecGetArray(vtmp,&vb);
2338: for (i=0; i<A->rmap->n; i++) {
2339: if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2340: va[i] = vb[i];
2341: if (idx) idx[i] = a->garray[idxb[i]];
2342: }
2343: }
2345: VecRestoreArray(v,&va);
2346: VecRestoreArray(vtmp,&vb);
2347: PetscFree(idxb);
2348: VecDestroy(&vtmp);
2349: return(0);
2350: }
2352: PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2353: {
2354: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2356: PetscInt i,*idxb = 0;
2357: PetscScalar *va,*vb;
2358: Vec vtmp;
2361: MatGetRowMinAbs(a->A,v,idx);
2362: VecGetArray(v,&va);
2363: if (idx) {
2364: for (i=0; i<A->cmap->n; i++) {
2365: if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2366: }
2367: }
2369: VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);
2370: if (idx) {
2371: PetscMalloc1(A->rmap->n,&idxb);
2372: }
2373: MatGetRowMinAbs(a->B,vtmp,idxb);
2374: VecGetArray(vtmp,&vb);
2376: for (i=0; i<A->rmap->n; i++) {
2377: if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2378: va[i] = vb[i];
2379: if (idx) idx[i] = a->garray[idxb[i]];
2380: }
2381: }
2383: VecRestoreArray(v,&va);
2384: VecRestoreArray(vtmp,&vb);
2385: PetscFree(idxb);
2386: VecDestroy(&vtmp);
2387: return(0);
2388: }
2390: PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2391: {
2392: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2393: PetscInt n = A->rmap->n;
2394: PetscInt cstart = A->cmap->rstart;
2395: PetscInt *cmap = mat->garray;
2396: PetscInt *diagIdx, *offdiagIdx;
2397: Vec diagV, offdiagV;
2398: PetscScalar *a, *diagA, *offdiagA;
2399: PetscInt r;
2403: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2404: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);
2405: VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);
2406: MatGetRowMin(mat->A, diagV, diagIdx);
2407: MatGetRowMin(mat->B, offdiagV, offdiagIdx);
2408: VecGetArray(v, &a);
2409: VecGetArray(diagV, &diagA);
2410: VecGetArray(offdiagV, &offdiagA);
2411: for (r = 0; r < n; ++r) {
2412: if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2413: a[r] = diagA[r];
2414: idx[r] = cstart + diagIdx[r];
2415: } else {
2416: a[r] = offdiagA[r];
2417: idx[r] = cmap[offdiagIdx[r]];
2418: }
2419: }
2420: VecRestoreArray(v, &a);
2421: VecRestoreArray(diagV, &diagA);
2422: VecRestoreArray(offdiagV, &offdiagA);
2423: VecDestroy(&diagV);
2424: VecDestroy(&offdiagV);
2425: PetscFree2(diagIdx, offdiagIdx);
2426: return(0);
2427: }
2429: PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2430: {
2431: Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data;
2432: PetscInt n = A->rmap->n;
2433: PetscInt cstart = A->cmap->rstart;
2434: PetscInt *cmap = mat->garray;
2435: PetscInt *diagIdx, *offdiagIdx;
2436: Vec diagV, offdiagV;
2437: PetscScalar *a, *diagA, *offdiagA;
2438: PetscInt r;
2442: PetscMalloc2(n,&diagIdx,n,&offdiagIdx);
2443: VecCreateSeq(PETSC_COMM_SELF, n, &diagV);
2444: VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);
2445: MatGetRowMax(mat->A, diagV, diagIdx);
2446: MatGetRowMax(mat->B, offdiagV, offdiagIdx);
2447: VecGetArray(v, &a);
2448: VecGetArray(diagV, &diagA);
2449: VecGetArray(offdiagV, &offdiagA);
2450: for (r = 0; r < n; ++r) {
2451: if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2452: a[r] = diagA[r];
2453: idx[r] = cstart + diagIdx[r];
2454: } else {
2455: a[r] = offdiagA[r];
2456: idx[r] = cmap[offdiagIdx[r]];
2457: }
2458: }
2459: VecRestoreArray(v, &a);
2460: VecRestoreArray(diagV, &diagA);
2461: VecRestoreArray(offdiagV, &offdiagA);
2462: VecDestroy(&diagV);
2463: VecDestroy(&offdiagV);
2464: PetscFree2(diagIdx, offdiagIdx);
2465: return(0);
2466: }
2468: PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2469: {
2471: Mat *dummy;
2474: MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);
2475: *newmat = *dummy;
2476: PetscFree(dummy);
2477: return(0);
2478: }
2480: PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2481: {
2482: Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data;
2486: MatInvertBlockDiagonal(a->A,values);
2487: A->factorerrortype = a->A->factorerrortype;
2488: return(0);
2489: }
2491: static PetscErrorCode MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2492: {
2494: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)x->data;
2497: if (!x->assembled && !x->preallocated) SETERRQ(PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2498: MatSetRandom(aij->A,rctx);
2499: if (x->assembled) {
2500: MatSetRandom(aij->B,rctx);
2501: } else {
2502: MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B,x->cmap->rstart,x->cmap->rend,rctx);
2503: }
2504: MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
2505: MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
2506: return(0);
2507: }
2509: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2510: {
2512: if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2513: else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ;
2514: return(0);
2515: }
2517: /*@
2518: MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap
2520: Collective on Mat
2522: Input Parameters:
2523: + A - the matrix
2524: - sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)
2526: Level: advanced
2528: @*/
2529: PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2530: {
2531: PetscErrorCode ierr;
2534: PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));
2535: return(0);
2536: }
2538: PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2539: {
2540: PetscErrorCode ierr;
2541: PetscBool sc = PETSC_FALSE,flg;
2544: PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");
2545: if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2546: PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);
2547: if (flg) {
2548: MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);
2549: }
2550: PetscOptionsTail();
2551: return(0);
2552: }
2554: PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2555: {
2557: Mat_MPIAIJ *maij = (Mat_MPIAIJ*)Y->data;
2558: Mat_SeqAIJ *aij = (Mat_SeqAIJ*)maij->A->data;
2561: if (!Y->preallocated) {
2562: MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);
2563: } else if (!aij->nz) {
2564: PetscInt nonew = aij->nonew;
2565: MatSeqAIJSetPreallocation(maij->A,1,NULL);
2566: aij->nonew = nonew;
2567: }
2568: MatShift_Basic(Y,a);
2569: return(0);
2570: }
2572: PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool *missing,PetscInt *d)
2573: {
2574: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2578: if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2579: MatMissingDiagonal(a->A,missing,d);
2580: if (d) {
2581: PetscInt rstart;
2582: MatGetOwnershipRange(A,&rstart,NULL);
2583: *d += rstart;
2585: }
2586: return(0);
2587: }
2589: PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *diag)
2590: {
2591: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
2595: MatInvertVariableBlockDiagonal(a->A,nblocks,bsizes,diag);
2596: return(0);
2597: }
2599: /* -------------------------------------------------------------------*/
2600: static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2601: MatGetRow_MPIAIJ,
2602: MatRestoreRow_MPIAIJ,
2603: MatMult_MPIAIJ,
2604: /* 4*/ MatMultAdd_MPIAIJ,
2605: MatMultTranspose_MPIAIJ,
2606: MatMultTransposeAdd_MPIAIJ,
2607: 0,
2608: 0,
2609: 0,
2610: /*10*/ 0,
2611: 0,
2612: 0,
2613: MatSOR_MPIAIJ,
2614: MatTranspose_MPIAIJ,
2615: /*15*/ MatGetInfo_MPIAIJ,
2616: MatEqual_MPIAIJ,
2617: MatGetDiagonal_MPIAIJ,
2618: MatDiagonalScale_MPIAIJ,
2619: MatNorm_MPIAIJ,
2620: /*20*/ MatAssemblyBegin_MPIAIJ,
2621: MatAssemblyEnd_MPIAIJ,
2622: MatSetOption_MPIAIJ,
2623: MatZeroEntries_MPIAIJ,
2624: /*24*/ MatZeroRows_MPIAIJ,
2625: 0,
2626: 0,
2627: 0,
2628: 0,
2629: /*29*/ MatSetUp_MPIAIJ,
2630: 0,
2631: 0,
2632: MatGetDiagonalBlock_MPIAIJ,
2633: 0,
2634: /*34*/ MatDuplicate_MPIAIJ,
2635: 0,
2636: 0,
2637: 0,
2638: 0,
2639: /*39*/ MatAXPY_MPIAIJ,
2640: MatCreateSubMatrices_MPIAIJ,
2641: MatIncreaseOverlap_MPIAIJ,
2642: MatGetValues_MPIAIJ,
2643: MatCopy_MPIAIJ,
2644: /*44*/ MatGetRowMax_MPIAIJ,
2645: MatScale_MPIAIJ,
2646: MatShift_MPIAIJ,
2647: MatDiagonalSet_MPIAIJ,
2648: MatZeroRowsColumns_MPIAIJ,
2649: /*49*/ MatSetRandom_MPIAIJ,
2650: 0,
2651: 0,
2652: 0,
2653: 0,
2654: /*54*/ MatFDColoringCreate_MPIXAIJ,
2655: 0,
2656: MatSetUnfactored_MPIAIJ,
2657: MatPermute_MPIAIJ,
2658: 0,
2659: /*59*/ MatCreateSubMatrix_MPIAIJ,
2660: MatDestroy_MPIAIJ,
2661: MatView_MPIAIJ,
2662: 0,
2663: 0,
2664: /*64*/ 0,
2665: MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2666: 0,
2667: 0,
2668: 0,
2669: /*69*/ MatGetRowMaxAbs_MPIAIJ,
2670: MatGetRowMinAbs_MPIAIJ,
2671: 0,
2672: 0,
2673: 0,
2674: 0,
2675: /*75*/ MatFDColoringApply_AIJ,
2676: MatSetFromOptions_MPIAIJ,
2677: 0,
2678: 0,
2679: MatFindZeroDiagonals_MPIAIJ,
2680: /*80*/ 0,
2681: 0,
2682: 0,
2683: /*83*/ MatLoad_MPIAIJ,
2684: MatIsSymmetric_MPIAIJ,
2685: 0,
2686: 0,
2687: 0,
2688: 0,
2689: /*89*/ 0,
2690: 0,
2691: MatMatMultNumeric_MPIAIJ_MPIAIJ,
2692: 0,
2693: 0,
2694: /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2695: 0,
2696: 0,
2697: 0,
2698: MatBindToCPU_MPIAIJ,
2699: /*99*/ MatProductSetFromOptions_MPIAIJ,
2700: 0,
2701: 0,
2702: MatConjugate_MPIAIJ,
2703: 0,
2704: /*104*/MatSetValuesRow_MPIAIJ,
2705: MatRealPart_MPIAIJ,
2706: MatImaginaryPart_MPIAIJ,
2707: 0,
2708: 0,
2709: /*109*/0,
2710: 0,
2711: MatGetRowMin_MPIAIJ,
2712: 0,
2713: MatMissingDiagonal_MPIAIJ,
2714: /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2715: 0,
2716: MatGetGhosts_MPIAIJ,
2717: 0,
2718: 0,
2719: /*119*/0,
2720: 0,
2721: 0,
2722: 0,
2723: MatGetMultiProcBlock_MPIAIJ,
2724: /*124*/MatFindNonzeroRows_MPIAIJ,
2725: MatGetColumnNorms_MPIAIJ,
2726: MatInvertBlockDiagonal_MPIAIJ,
2727: MatInvertVariableBlockDiagonal_MPIAIJ,
2728: MatCreateSubMatricesMPI_MPIAIJ,
2729: /*129*/0,
2730: 0,
2731: 0,
2732: MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2733: 0,
2734: /*134*/0,
2735: 0,
2736: 0,
2737: 0,
2738: 0,
2739: /*139*/MatSetBlockSizes_MPIAIJ,
2740: 0,
2741: 0,
2742: MatFDColoringSetUp_MPIXAIJ,
2743: MatFindOffBlockDiagonalEntries_MPIAIJ,
2744: MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2745: /*145*/0,
2746: 0,
2747: 0
2748: };
2750: /* ----------------------------------------------------------------------------------------*/
2752: PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2753: {
2754: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2758: MatStoreValues(aij->A);
2759: MatStoreValues(aij->B);
2760: return(0);
2761: }
2763: PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2764: {
2765: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
2769: MatRetrieveValues(aij->A);
2770: MatRetrieveValues(aij->B);
2771: return(0);
2772: }
2774: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2775: {
2776: Mat_MPIAIJ *b;
2778: PetscMPIInt size;
2781: PetscLayoutSetUp(B->rmap);
2782: PetscLayoutSetUp(B->cmap);
2783: b = (Mat_MPIAIJ*)B->data;
2785: #if defined(PETSC_USE_CTABLE)
2786: PetscTableDestroy(&b->colmap);
2787: #else
2788: PetscFree(b->colmap);
2789: #endif
2790: PetscFree(b->garray);
2791: VecDestroy(&b->lvec);
2792: VecScatterDestroy(&b->Mvctx);
2794: /* Because the B will have been resized we simply destroy it and create a new one each time */
2795: MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
2796: MatDestroy(&b->B);
2797: MatCreate(PETSC_COMM_SELF,&b->B);
2798: MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);
2799: MatSetBlockSizesFromMats(b->B,B,B);
2800: MatSetType(b->B,MATSEQAIJ);
2801: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
2803: if (!B->preallocated) {
2804: MatCreate(PETSC_COMM_SELF,&b->A);
2805: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
2806: MatSetBlockSizesFromMats(b->A,B,B);
2807: MatSetType(b->A,MATSEQAIJ);
2808: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
2809: }
2811: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
2812: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
2813: B->preallocated = PETSC_TRUE;
2814: B->was_assembled = PETSC_FALSE;
2815: B->assembled = PETSC_FALSE;
2817: /* Set inode option */
2818: if (b->inode_setoption) {
2819: MatSetOption(b->A,MAT_USE_INODES,b->inode_use);
2820: MatSetOption(b->B,MAT_USE_INODES,b->inode_use);
2821: }
2822: return(0);
2823: }
2825: PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2826: {
2827: Mat_MPIAIJ *b;
2832: PetscLayoutSetUp(B->rmap);
2833: PetscLayoutSetUp(B->cmap);
2834: b = (Mat_MPIAIJ*)B->data;
2836: #if defined(PETSC_USE_CTABLE)
2837: PetscTableDestroy(&b->colmap);
2838: #else
2839: PetscFree(b->colmap);
2840: #endif
2841: PetscFree(b->garray);
2842: VecDestroy(&b->lvec);
2843: VecScatterDestroy(&b->Mvctx);
2845: MatResetPreallocation(b->A);
2846: MatResetPreallocation(b->B);
2847: B->preallocated = PETSC_TRUE;
2848: B->was_assembled = PETSC_FALSE;
2849: B->assembled = PETSC_FALSE;
2850: return(0);
2851: }
2853: PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2854: {
2855: Mat mat;
2856: Mat_MPIAIJ *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2860: *newmat = 0;
2861: MatCreate(PetscObjectComm((PetscObject)matin),&mat);
2862: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2863: MatSetBlockSizesFromMats(mat,matin,matin);
2864: MatSetType(mat,((PetscObject)matin)->type_name);
2865: a = (Mat_MPIAIJ*)mat->data;
2867: mat->factortype = matin->factortype;
2868: mat->assembled = matin->assembled;
2869: mat->insertmode = NOT_SET_VALUES;
2870: mat->preallocated = matin->preallocated;
2872: a->size = oldmat->size;
2873: a->rank = oldmat->rank;
2874: a->donotstash = oldmat->donotstash;
2875: a->roworiented = oldmat->roworiented;
2876: a->rowindices = NULL;
2877: a->rowvalues = NULL;
2878: a->getrowactive = PETSC_FALSE;
2880: PetscLayoutReference(matin->rmap,&mat->rmap);
2881: PetscLayoutReference(matin->cmap,&mat->cmap);
2883: if (oldmat->colmap) {
2884: #if defined(PETSC_USE_CTABLE)
2885: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2886: #else
2887: PetscMalloc1(mat->cmap->N,&a->colmap);
2888: PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));
2889: PetscArraycpy(a->colmap,oldmat->colmap,mat->cmap->N);
2890: #endif
2891: } else a->colmap = NULL;
2892: if (oldmat->garray) {
2893: PetscInt len;
2894: len = oldmat->B->cmap->n;
2895: PetscMalloc1(len+1,&a->garray);
2896: PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));
2897: if (len) { PetscArraycpy(a->garray,oldmat->garray,len); }
2898: } else a->garray = NULL;
2900: /* It may happen MatDuplicate is called with a non-assembled matrix
2901: In fact, MatDuplicate only requires the matrix to be preallocated
2902: This may happen inside a DMCreateMatrix_Shell */
2903: if (oldmat->lvec) {
2904: VecDuplicate(oldmat->lvec,&a->lvec);
2905: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);
2906: }
2907: if (oldmat->Mvctx) {
2908: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2909: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);
2910: }
2911: if (oldmat->Mvctx_mpi1) {
2912: VecScatterCopy(oldmat->Mvctx_mpi1,&a->Mvctx_mpi1);
2913: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx_mpi1);
2914: }
2916: MatDuplicate(oldmat->A,cpvalues,&a->A);
2917: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);
2918: MatDuplicate(oldmat->B,cpvalues,&a->B);
2919: PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);
2920: PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2921: *newmat = mat;
2922: return(0);
2923: }
2925: PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2926: {
2927: PetscBool isbinary, ishdf5;
2933: /* force binary viewer to load .info file if it has not yet done so */
2934: PetscViewerSetUp(viewer);
2935: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
2936: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERHDF5, &ishdf5);
2937: if (isbinary) {
2938: MatLoad_MPIAIJ_Binary(newMat,viewer);
2939: } else if (ishdf5) {
2940: #if defined(PETSC_HAVE_HDF5)
2941: MatLoad_AIJ_HDF5(newMat,viewer);
2942: #else
2943: SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
2944: #endif
2945: } else {
2946: SETERRQ2(PetscObjectComm((PetscObject)newMat),PETSC_ERR_SUP,"Viewer type %s not yet supported for reading %s matrices",((PetscObject)viewer)->type_name,((PetscObject)newMat)->type_name);
2947: }
2948: return(0);
2949: }
2951: PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
2952: {
2953: PetscInt header[4],M,N,m,nz,rows,cols,sum,i;
2954: PetscInt *rowidxs,*colidxs;
2955: PetscScalar *matvals;
2959: PetscViewerSetUp(viewer);
2961: /* read in matrix header */
2962: PetscViewerBinaryRead(viewer,header,4,NULL,PETSC_INT);
2963: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Not a matrix object in file");
2964: M = header[1]; N = header[2]; nz = header[3];
2965: if (M < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix row size (%D) in file is negative",M);
2966: if (N < 0) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Matrix column size (%D) in file is negative",N);
2967: if (nz < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk, cannot load as MPIAIJ");
2969: /* set block sizes from the viewer's .info file */
2970: MatLoad_Binary_BlockSizes(mat,viewer);
2971: /* set global sizes if not set already */
2972: if (mat->rmap->N < 0) mat->rmap->N = M;
2973: if (mat->cmap->N < 0) mat->cmap->N = N;
2974: PetscLayoutSetUp(mat->rmap);
2975: PetscLayoutSetUp(mat->cmap);
2977: /* check if the matrix sizes are correct */
2978: MatGetSize(mat,&rows,&cols);
2979: if (M != rows || N != cols) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%D, %D) than the input matrix (%D, %D)",M,N,rows,cols);
2981: /* read in row lengths and build row indices */
2982: MatGetLocalSize(mat,&m,NULL);
2983: PetscMalloc1(m+1,&rowidxs);
2984: PetscViewerBinaryReadAll(viewer,rowidxs+1,m,PETSC_DECIDE,M,PETSC_INT);
2985: rowidxs[0] = 0; for (i=0; i<m; i++) rowidxs[i+1] += rowidxs[i];
2986: MPIU_Allreduce(&rowidxs[m],&sum,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)viewer));
2987: if (sum != nz) SETERRQ2(PetscObjectComm((PetscObject)viewer),PETSC_ERR_FILE_UNEXPECTED,"Inconsistent matrix data in file: nonzeros = %D, sum-row-lengths = %D\n",nz,sum);
2988: /* read in column indices and matrix values */
2989: PetscMalloc2(rowidxs[m],&colidxs,rowidxs[m],&matvals);
2990: PetscViewerBinaryReadAll(viewer,colidxs,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_INT);
2991: PetscViewerBinaryReadAll(viewer,matvals,rowidxs[m],PETSC_DETERMINE,PETSC_DETERMINE,PETSC_SCALAR);
2992: /* store matrix indices and values */
2993: MatMPIAIJSetPreallocationCSR(mat,rowidxs,colidxs,matvals);
2994: PetscFree(rowidxs);
2995: PetscFree2(colidxs,matvals);
2996: return(0);
2997: }
2999: /* Not scalable because of ISAllGather() unless getting all columns. */
3000: PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
3001: {
3003: IS iscol_local;
3004: PetscBool isstride;
3005: PetscMPIInt lisstride=0,gisstride;
3008: /* check if we are grabbing all columns*/
3009: PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);
3011: if (isstride) {
3012: PetscInt start,len,mstart,mlen;
3013: ISStrideGetInfo(iscol,&start,NULL);
3014: ISGetLocalSize(iscol,&len);
3015: MatGetOwnershipRangeColumn(mat,&mstart,&mlen);
3016: if (mstart == start && mlen-mstart == len) lisstride = 1;
3017: }
3019: MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
3020: if (gisstride) {
3021: PetscInt N;
3022: MatGetSize(mat,NULL,&N);
3023: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol_local);
3024: ISSetIdentity(iscol_local);
3025: PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");
3026: } else {
3027: PetscInt cbs;
3028: ISGetBlockSize(iscol,&cbs);
3029: ISAllGather(iscol,&iscol_local);
3030: ISSetBlockSize(iscol_local,cbs);
3031: }
3033: *isseq = iscol_local;
3034: return(0);
3035: }
3037: /*
3038: Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3039: (see MatCreateSubMatrix_MPIAIJ_nonscalable)
3041: Input Parameters:
3042: mat - matrix
3043: isrow - parallel row index set; its local indices are a subset of local columns of mat,
3044: i.e., mat->rstart <= isrow[i] < mat->rend
3045: iscol - parallel column index set; its local indices are a subset of local columns of mat,
3046: i.e., mat->cstart <= iscol[i] < mat->cend
3047: Output Parameter:
3048: isrow_d,iscol_d - sequential row and column index sets for retrieving mat->A
3049: iscol_o - sequential column index set for retrieving mat->B
3050: garray - column map; garray[i] indicates global location of iscol_o[i] in iscol
3051: */
3052: PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat,IS isrow,IS iscol,IS *isrow_d,IS *iscol_d,IS *iscol_o,const PetscInt *garray[])
3053: {
3055: Vec x,cmap;
3056: const PetscInt *is_idx;
3057: PetscScalar *xarray,*cmaparray;
3058: PetscInt ncols,isstart,*idx,m,rstart,*cmap1,count;
3059: Mat_MPIAIJ *a=(Mat_MPIAIJ*)mat->data;
3060: Mat B=a->B;
3061: Vec lvec=a->lvec,lcmap;
3062: PetscInt i,cstart,cend,Bn=B->cmap->N;
3063: MPI_Comm comm;
3064: VecScatter Mvctx=a->Mvctx;
3067: PetscObjectGetComm((PetscObject)mat,&comm);
3068: ISGetLocalSize(iscol,&ncols);
3070: /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3071: MatCreateVecs(mat,&x,NULL);
3072: VecSet(x,-1.0);
3073: VecDuplicate(x,&cmap);
3074: VecSet(cmap,-1.0);
3076: /* Get start indices */
3077: MPI_Scan(&ncols,&isstart,1,MPIU_INT,MPI_SUM,comm);
3078: isstart -= ncols;
3079: MatGetOwnershipRangeColumn(mat,&cstart,&cend);
3081: ISGetIndices(iscol,&is_idx);
3082: VecGetArray(x,&xarray);
3083: VecGetArray(cmap,&cmaparray);
3084: PetscMalloc1(ncols,&idx);
3085: for (i=0; i<ncols; i++) {
3086: xarray[is_idx[i]-cstart] = (PetscScalar)is_idx[i];
3087: cmaparray[is_idx[i]-cstart] = i + isstart; /* global index of iscol[i] */
3088: idx[i] = is_idx[i]-cstart; /* local index of iscol[i] */
3089: }
3090: VecRestoreArray(x,&xarray);
3091: VecRestoreArray(cmap,&cmaparray);
3092: ISRestoreIndices(iscol,&is_idx);
3094: /* Get iscol_d */
3095: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,iscol_d);
3096: ISGetBlockSize(iscol,&i);
3097: ISSetBlockSize(*iscol_d,i);
3099: /* Get isrow_d */
3100: ISGetLocalSize(isrow,&m);
3101: rstart = mat->rmap->rstart;
3102: PetscMalloc1(m,&idx);
3103: ISGetIndices(isrow,&is_idx);
3104: for (i=0; i<m; i++) idx[i] = is_idx[i]-rstart;
3105: ISRestoreIndices(isrow,&is_idx);
3107: ISCreateGeneral(PETSC_COMM_SELF,m,idx,PETSC_OWN_POINTER,isrow_d);
3108: ISGetBlockSize(isrow,&i);
3109: ISSetBlockSize(*isrow_d,i);
3111: /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3112: VecScatterBegin(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3113: VecScatterEnd(Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);
3115: VecDuplicate(lvec,&lcmap);
3117: VecScatterBegin(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3118: VecScatterEnd(Mvctx,cmap,lcmap,INSERT_VALUES,SCATTER_FORWARD);
3120: /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3121: /* off-process column indices */
3122: count = 0;
3123: PetscMalloc1(Bn,&idx);
3124: PetscMalloc1(Bn,&cmap1);
3126: VecGetArray(lvec,&xarray);
3127: VecGetArray(lcmap,&cmaparray);
3128: for (i=0; i<Bn; i++) {
3129: if (PetscRealPart(xarray[i]) > -1.0) {
3130: idx[count] = i; /* local column index in off-diagonal part B */
3131: cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]); /* column index in submat */
3132: count++;
3133: }
3134: }
3135: VecRestoreArray(lvec,&xarray);
3136: VecRestoreArray(lcmap,&cmaparray);
3138: ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,iscol_o);
3139: /* cannot ensure iscol_o has same blocksize as iscol! */
3141: PetscFree(idx);
3142: *garray = cmap1;
3144: VecDestroy(&x);
3145: VecDestroy(&cmap);
3146: VecDestroy(&lcmap);
3147: return(0);
3148: }
3150: /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3151: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *submat)
3152: {
3154: Mat_MPIAIJ *a = (Mat_MPIAIJ*)mat->data,*asub;
3155: Mat M = NULL;
3156: MPI_Comm comm;
3157: IS iscol_d,isrow_d,iscol_o;
3158: Mat Asub = NULL,Bsub = NULL;
3159: PetscInt n;
3162: PetscObjectGetComm((PetscObject)mat,&comm);
3164: if (call == MAT_REUSE_MATRIX) {
3165: /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3166: PetscObjectQuery((PetscObject)*submat,"isrow_d",(PetscObject*)&isrow_d);
3167: if (!isrow_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow_d passed in was not used before, cannot reuse");
3169: PetscObjectQuery((PetscObject)*submat,"iscol_d",(PetscObject*)&iscol_d);
3170: if (!iscol_d) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_d passed in was not used before, cannot reuse");
3172: PetscObjectQuery((PetscObject)*submat,"iscol_o",(PetscObject*)&iscol_o);
3173: if (!iscol_o) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"iscol_o passed in was not used before, cannot reuse");
3175: /* Update diagonal and off-diagonal portions of submat */
3176: asub = (Mat_MPIAIJ*)(*submat)->data;
3177: MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->A);
3178: ISGetLocalSize(iscol_o,&n);
3179: if (n) {
3180: MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_REUSE_MATRIX,&asub->B);
3181: }
3182: MatAssemblyBegin(*submat,MAT_FINAL_ASSEMBLY);
3183: MatAssemblyEnd(*submat,MAT_FINAL_ASSEMBLY);
3185: } else { /* call == MAT_INITIAL_MATRIX) */
3186: const PetscInt *garray;
3187: PetscInt BsubN;
3189: /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3190: ISGetSeqIS_SameColDist_Private(mat,isrow,iscol,&isrow_d,&iscol_d,&iscol_o,&garray);
3192: /* Create local submatrices Asub and Bsub */
3193: MatCreateSubMatrix_SeqAIJ(a->A,isrow_d,iscol_d,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Asub);
3194: MatCreateSubMatrix_SeqAIJ(a->B,isrow_d,iscol_o,PETSC_DECIDE,MAT_INITIAL_MATRIX,&Bsub);
3196: /* Create submatrix M */
3197: MatCreateMPIAIJWithSeqAIJ(comm,Asub,Bsub,garray,&M);
3199: /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3200: asub = (Mat_MPIAIJ*)M->data;
3202: ISGetLocalSize(iscol_o,&BsubN);
3203: n = asub->B->cmap->N;
3204: if (BsubN > n) {
3205: /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3206: const PetscInt *idx;
3207: PetscInt i,j,*idx_new,*subgarray = asub->garray;
3208: PetscInfo2(M,"submatrix Bn %D != BsubN %D, update iscol_o\n",n,BsubN);
3210: PetscMalloc1(n,&idx_new);
3211: j = 0;
3212: ISGetIndices(iscol_o,&idx);
3213: for (i=0; i<n; i++) {
3214: if (j >= BsubN) break;
3215: while (subgarray[i] > garray[j]) j++;
3217: if (subgarray[i] == garray[j]) {
3218: idx_new[i] = idx[j++];
3219: } else SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"subgarray[%D]=%D cannot < garray[%D]=%D",i,subgarray[i],j,garray[j]);
3220: }
3221: ISRestoreIndices(iscol_o,&idx);
3223: ISDestroy(&iscol_o);
3224: ISCreateGeneral(PETSC_COMM_SELF,n,idx_new,PETSC_OWN_POINTER,&iscol_o);
3226: } else if (BsubN < n) {
3227: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Columns of Bsub cannot be smaller than B's",BsubN,asub->B->cmap->N);
3228: }
3230: PetscFree(garray);
3231: *submat = M;
3233: /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3234: PetscObjectCompose((PetscObject)M,"isrow_d",(PetscObject)isrow_d);
3235: ISDestroy(&isrow_d);
3237: PetscObjectCompose((PetscObject)M,"iscol_d",(PetscObject)iscol_d);
3238: ISDestroy(&iscol_d);
3240: PetscObjectCompose((PetscObject)M,"iscol_o",(PetscObject)iscol_o);
3241: ISDestroy(&iscol_o);
3242: }
3243: return(0);
3244: }
3246: PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3247: {
3249: IS iscol_local=NULL,isrow_d;
3250: PetscInt csize;
3251: PetscInt n,i,j,start,end;
3252: PetscBool sameRowDist=PETSC_FALSE,sameDist[2],tsameDist[2];
3253: MPI_Comm comm;
3256: /* If isrow has same processor distribution as mat,
3257: call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3258: if (call == MAT_REUSE_MATRIX) {
3259: PetscObjectQuery((PetscObject)*newmat,"isrow_d",(PetscObject*)&isrow_d);
3260: if (isrow_d) {
3261: sameRowDist = PETSC_TRUE;
3262: tsameDist[1] = PETSC_TRUE; /* sameColDist */
3263: } else {
3264: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);
3265: if (iscol_local) {
3266: sameRowDist = PETSC_TRUE;
3267: tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3268: }
3269: }
3270: } else {
3271: /* Check if isrow has same processor distribution as mat */
3272: sameDist[0] = PETSC_FALSE;
3273: ISGetLocalSize(isrow,&n);
3274: if (!n) {
3275: sameDist[0] = PETSC_TRUE;
3276: } else {
3277: ISGetMinMax(isrow,&i,&j);
3278: MatGetOwnershipRange(mat,&start,&end);
3279: if (i >= start && j < end) {
3280: sameDist[0] = PETSC_TRUE;
3281: }
3282: }
3284: /* Check if iscol has same processor distribution as mat */
3285: sameDist[1] = PETSC_FALSE;
3286: ISGetLocalSize(iscol,&n);
3287: if (!n) {
3288: sameDist[1] = PETSC_TRUE;
3289: } else {
3290: ISGetMinMax(iscol,&i,&j);
3291: MatGetOwnershipRangeColumn(mat,&start,&end);
3292: if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3293: }
3295: PetscObjectGetComm((PetscObject)mat,&comm);
3296: MPIU_Allreduce(&sameDist,&tsameDist,2,MPIU_BOOL,MPI_LAND,comm);
3297: sameRowDist = tsameDist[0];
3298: }
3300: if (sameRowDist) {
3301: if (tsameDist[1]) { /* sameRowDist & sameColDist */
3302: /* isrow and iscol have same processor distribution as mat */
3303: MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat,isrow,iscol,call,newmat);
3304: return(0);
3305: } else { /* sameRowDist */
3306: /* isrow has same processor distribution as mat */
3307: if (call == MAT_INITIAL_MATRIX) {
3308: PetscBool sorted;
3309: ISGetSeqIS_Private(mat,iscol,&iscol_local);
3310: ISGetLocalSize(iscol_local,&n); /* local size of iscol_local = global columns of newmat */
3311: ISGetSize(iscol,&i);
3312: if (n != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n %d != size of iscol %d",n,i);
3314: ISSorted(iscol_local,&sorted);
3315: if (sorted) {
3316: /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3317: MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,iscol_local,MAT_INITIAL_MATRIX,newmat);
3318: return(0);
3319: }
3320: } else { /* call == MAT_REUSE_MATRIX */
3321: IS iscol_sub;
3322: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3323: if (iscol_sub) {
3324: MatCreateSubMatrix_MPIAIJ_SameRowDist(mat,isrow,iscol,NULL,call,newmat);
3325: return(0);
3326: }
3327: }
3328: }
3329: }
3331: /* General case: iscol -> iscol_local which has global size of iscol */
3332: if (call == MAT_REUSE_MATRIX) {
3333: PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);
3334: if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3335: } else {
3336: if (!iscol_local) {
3337: ISGetSeqIS_Private(mat,iscol,&iscol_local);
3338: }
3339: }
3341: ISGetLocalSize(iscol,&csize);
3342: MatCreateSubMatrix_MPIAIJ_nonscalable(mat,isrow,iscol_local,csize,call,newmat);
3344: if (call == MAT_INITIAL_MATRIX) {
3345: PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);
3346: ISDestroy(&iscol_local);
3347: }
3348: return(0);
3349: }
3351: /*@C
3352: MatCreateMPIAIJWithSeqAIJ - creates a MPIAIJ matrix using SeqAIJ matrices that contain the "diagonal"
3353: and "off-diagonal" part of the matrix in CSR format.
3355: Collective
3357: Input Parameters:
3358: + comm - MPI communicator
3359: . A - "diagonal" portion of matrix
3360: . B - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3361: - garray - global index of B columns
3363: Output Parameter:
3364: . mat - the matrix, with input A as its local diagonal matrix
3365: Level: advanced
3367: Notes:
3368: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3369: A becomes part of output mat, B is destroyed by this routine. The user cannot use A and B anymore.
3371: .seealso: MatCreateMPIAIJWithSplitArrays()
3372: @*/
3373: PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm,Mat A,Mat B,const PetscInt garray[],Mat *mat)
3374: {
3376: Mat_MPIAIJ *maij;
3377: Mat_SeqAIJ *b=(Mat_SeqAIJ*)B->data,*bnew;
3378: PetscInt *oi=b->i,*oj=b->j,i,nz,col;
3379: PetscScalar *oa=b->a;
3380: Mat Bnew;
3381: PetscInt m,n,N;
3384: MatCreate(comm,mat);
3385: MatGetSize(A,&m,&n);
3386: if (m != B->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Am %D != Bm %D",m,B->rmap->N);
3387: if (A->rmap->bs != B->rmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A row bs %D != B row bs %D",A->rmap->bs,B->rmap->bs);
3388: /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3389: /* if (A->cmap->bs != B->cmap->bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %D != B column bs %D",A->cmap->bs,B->cmap->bs); */
3391: /* Get global columns of mat */
3392: MPIU_Allreduce(&n,&N,1,MPIU_INT,MPI_SUM,comm);
3394: MatSetSizes(*mat,m,n,PETSC_DECIDE,N);
3395: MatSetType(*mat,MATMPIAIJ);
3396: MatSetBlockSizes(*mat,A->rmap->bs,A->cmap->bs);
3397: maij = (Mat_MPIAIJ*)(*mat)->data;
3399: (*mat)->preallocated = PETSC_TRUE;
3401: PetscLayoutSetUp((*mat)->rmap);
3402: PetscLayoutSetUp((*mat)->cmap);
3404: /* Set A as diagonal portion of *mat */
3405: maij->A = A;
3407: nz = oi[m];
3408: for (i=0; i<nz; i++) {
3409: col = oj[i];
3410: oj[i] = garray[col];
3411: }
3413: /* Set Bnew as off-diagonal portion of *mat */
3414: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,N,oi,oj,oa,&Bnew);
3415: bnew = (Mat_SeqAIJ*)Bnew->data;
3416: bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3417: maij->B = Bnew;
3419: if (B->rmap->N != Bnew->rmap->N) SETERRQ2(PETSC_COMM_SELF,0,"BN %d != BnewN %d",B->rmap->N,Bnew->rmap->N);
3421: b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3422: b->free_a = PETSC_FALSE;
3423: b->free_ij = PETSC_FALSE;
3424: MatDestroy(&B);
3426: bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3427: bnew->free_a = PETSC_TRUE;
3428: bnew->free_ij = PETSC_TRUE;
3430: /* condense columns of maij->B */
3431: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
3432: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
3433: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
3434: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
3435: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3436: return(0);
3437: }
3439: extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);
3441: PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat,IS isrow,IS iscol,IS iscol_local,MatReuse call,Mat *newmat)
3442: {
3444: PetscInt i,m,n,rstart,row,rend,nz,j,bs,cbs;
3445: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3446: Mat_MPIAIJ *a=(Mat_MPIAIJ*)mat->data;
3447: Mat M,Msub,B=a->B;
3448: MatScalar *aa;
3449: Mat_SeqAIJ *aij;
3450: PetscInt *garray = a->garray,*colsub,Ncols;
3451: PetscInt count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3452: IS iscol_sub,iscmap;
3453: const PetscInt *is_idx,*cmap;
3454: PetscBool allcolumns=PETSC_FALSE;
3455: MPI_Comm comm;
3458: PetscObjectGetComm((PetscObject)mat,&comm);
3460: if (call == MAT_REUSE_MATRIX) {
3461: PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);
3462: if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3463: ISGetLocalSize(iscol_sub,&count);
3465: PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);
3466: if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");
3468: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);
3469: if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3471: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);
3473: } else { /* call == MAT_INITIAL_MATRIX) */
3474: PetscBool flg;
3476: ISGetLocalSize(iscol,&n);
3477: ISGetSize(iscol,&Ncols);
3479: /* (1) iscol -> nonscalable iscol_local */
3480: /* Check for special case: each processor gets entire matrix columns */
3481: ISIdentity(iscol_local,&flg);
3482: if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3483: if (allcolumns) {
3484: iscol_sub = iscol_local;
3485: PetscObjectReference((PetscObject)iscol_local);
3486: ISCreateStride(PETSC_COMM_SELF,n,0,1,&iscmap);
3488: } else {
3489: /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3490: PetscInt *idx,*cmap1,k;
3491: PetscMalloc1(Ncols,&idx);
3492: PetscMalloc1(Ncols,&cmap1);
3493: ISGetIndices(iscol_local,&is_idx);
3494: count = 0;
3495: k = 0;
3496: for (i=0; i<Ncols; i++) {
3497: j = is_idx[i];
3498: if (j >= cstart && j < cend) {
3499: /* diagonal part of mat */
3500: idx[count] = j;
3501: cmap1[count++] = i; /* column index in submat */
3502: } else if (Bn) {
3503: /* off-diagonal part of mat */
3504: if (j == garray[k]) {
3505: idx[count] = j;
3506: cmap1[count++] = i; /* column index in submat */
3507: } else if (j > garray[k]) {
3508: while (j > garray[k] && k < Bn-1) k++;
3509: if (j == garray[k]) {
3510: idx[count] = j;
3511: cmap1[count++] = i; /* column index in submat */
3512: }
3513: }
3514: }
3515: }
3516: ISRestoreIndices(iscol_local,&is_idx);
3518: ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_OWN_POINTER,&iscol_sub);
3519: ISGetBlockSize(iscol,&cbs);
3520: ISSetBlockSize(iscol_sub,cbs);
3522: ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local),count,cmap1,PETSC_OWN_POINTER,&iscmap);
3523: }
3525: /* (3) Create sequential Msub */
3526: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,allcolumns,&Msub);
3527: }
3529: ISGetLocalSize(iscol_sub,&count);
3530: aij = (Mat_SeqAIJ*)(Msub)->data;
3531: ii = aij->i;
3532: ISGetIndices(iscmap,&cmap);
3534: /*
3535: m - number of local rows
3536: Ncols - number of columns (same on all processors)
3537: rstart - first row in new global matrix generated
3538: */
3539: MatGetSize(Msub,&m,NULL);
3541: if (call == MAT_INITIAL_MATRIX) {
3542: /* (4) Create parallel newmat */
3543: PetscMPIInt rank,size;
3544: PetscInt csize;
3546: MPI_Comm_size(comm,&size);
3547: MPI_Comm_rank(comm,&rank);
3549: /*
3550: Determine the number of non-zeros in the diagonal and off-diagonal
3551: portions of the matrix in order to do correct preallocation
3552: */
3554: /* first get start and end of "diagonal" columns */
3555: ISGetLocalSize(iscol,&csize);
3556: if (csize == PETSC_DECIDE) {
3557: ISGetSize(isrow,&mglobal);
3558: if (mglobal == Ncols) { /* square matrix */
3559: nlocal = m;
3560: } else {
3561: nlocal = Ncols/size + ((Ncols % size) > rank);
3562: }
3563: } else {
3564: nlocal = csize;
3565: }
3566: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3567: rstart = rend - nlocal;
3568: if (rank == size - 1 && rend != Ncols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,Ncols);
3570: /* next, compute all the lengths */
3571: jj = aij->j;
3572: PetscMalloc1(2*m+1,&dlens);
3573: olens = dlens + m;
3574: for (i=0; i<m; i++) {
3575: jend = ii[i+1] - ii[i];
3576: olen = 0;
3577: dlen = 0;
3578: for (j=0; j<jend; j++) {
3579: if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3580: else dlen++;
3581: jj++;
3582: }
3583: olens[i] = olen;
3584: dlens[i] = dlen;
3585: }
3587: ISGetBlockSize(isrow,&bs);
3588: ISGetBlockSize(iscol,&cbs);
3590: MatCreate(comm,&M);
3591: MatSetSizes(M,m,nlocal,PETSC_DECIDE,Ncols);
3592: MatSetBlockSizes(M,bs,cbs);
3593: MatSetType(M,((PetscObject)mat)->type_name);
3594: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3595: PetscFree(dlens);
3597: } else { /* call == MAT_REUSE_MATRIX */
3598: M = *newmat;
3599: MatGetLocalSize(M,&i,NULL);
3600: if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3601: MatZeroEntries(M);
3602: /*
3603: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3604: rather than the slower MatSetValues().
3605: */
3606: M->was_assembled = PETSC_TRUE;
3607: M->assembled = PETSC_FALSE;
3608: }
3610: /* (5) Set values of Msub to *newmat */
3611: PetscMalloc1(count,&colsub);
3612: MatGetOwnershipRange(M,&rstart,NULL);
3614: jj = aij->j;
3615: aa = aij->a;
3616: for (i=0; i<m; i++) {
3617: row = rstart + i;
3618: nz = ii[i+1] - ii[i];
3619: for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3620: MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);
3621: jj += nz; aa += nz;
3622: }
3623: ISRestoreIndices(iscmap,&cmap);
3625: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3626: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3628: PetscFree(colsub);
3630: /* save Msub, iscol_sub and iscmap used in processor for next request */
3631: if (call == MAT_INITIAL_MATRIX) {
3632: *newmat = M;
3633: PetscObjectCompose((PetscObject)(*newmat),"SubMatrix",(PetscObject)Msub);
3634: MatDestroy(&Msub);
3636: PetscObjectCompose((PetscObject)(*newmat),"SubIScol",(PetscObject)iscol_sub);
3637: ISDestroy(&iscol_sub);
3639: PetscObjectCompose((PetscObject)(*newmat),"Subcmap",(PetscObject)iscmap);
3640: ISDestroy(&iscmap);
3642: if (iscol_local) {
3643: PetscObjectCompose((PetscObject)(*newmat),"ISAllGather",(PetscObject)iscol_local);
3644: ISDestroy(&iscol_local);
3645: }
3646: }
3647: return(0);
3648: }
3650: /*
3651: Not great since it makes two copies of the submatrix, first an SeqAIJ
3652: in local and then by concatenating the local matrices the end result.
3653: Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()
3655: Note: This requires a sequential iscol with all indices.
3656: */
3657: PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3658: {
3660: PetscMPIInt rank,size;
3661: PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3662: PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3663: Mat M,Mreuse;
3664: MatScalar *aa,*vwork;
3665: MPI_Comm comm;
3666: Mat_SeqAIJ *aij;
3667: PetscBool colflag,allcolumns=PETSC_FALSE;
3670: PetscObjectGetComm((PetscObject)mat,&comm);
3671: MPI_Comm_rank(comm,&rank);
3672: MPI_Comm_size(comm,&size);
3674: /* Check for special case: each processor gets entire matrix columns */
3675: ISIdentity(iscol,&colflag);
3676: ISGetLocalSize(iscol,&n);
3677: if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3679: if (call == MAT_REUSE_MATRIX) {
3680: PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);
3681: if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3682: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,allcolumns,&Mreuse);
3683: } else {
3684: MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,allcolumns,&Mreuse);
3685: }
3687: /*
3688: m - number of local rows
3689: n - number of columns (same on all processors)
3690: rstart - first row in new global matrix generated
3691: */
3692: MatGetSize(Mreuse,&m,&n);
3693: MatGetBlockSizes(Mreuse,&bs,&cbs);
3694: if (call == MAT_INITIAL_MATRIX) {
3695: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3696: ii = aij->i;
3697: jj = aij->j;
3699: /*
3700: Determine the number of non-zeros in the diagonal and off-diagonal
3701: portions of the matrix in order to do correct preallocation
3702: */
3704: /* first get start and end of "diagonal" columns */
3705: if (csize == PETSC_DECIDE) {
3706: ISGetSize(isrow,&mglobal);
3707: if (mglobal == n) { /* square matrix */
3708: nlocal = m;
3709: } else {
3710: nlocal = n/size + ((n % size) > rank);
3711: }
3712: } else {
3713: nlocal = csize;
3714: }
3715: MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);
3716: rstart = rend - nlocal;
3717: if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3719: /* next, compute all the lengths */
3720: PetscMalloc1(2*m+1,&dlens);
3721: olens = dlens + m;
3722: for (i=0; i<m; i++) {
3723: jend = ii[i+1] - ii[i];
3724: olen = 0;
3725: dlen = 0;
3726: for (j=0; j<jend; j++) {
3727: if (*jj < rstart || *jj >= rend) olen++;
3728: else dlen++;
3729: jj++;
3730: }
3731: olens[i] = olen;
3732: dlens[i] = dlen;
3733: }
3734: MatCreate(comm,&M);
3735: MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);
3736: MatSetBlockSizes(M,bs,cbs);
3737: MatSetType(M,((PetscObject)mat)->type_name);
3738: MatMPIAIJSetPreallocation(M,0,dlens,0,olens);
3739: PetscFree(dlens);
3740: } else {
3741: PetscInt ml,nl;
3743: M = *newmat;
3744: MatGetLocalSize(M,&ml,&nl);
3745: if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3746: MatZeroEntries(M);
3747: /*
3748: The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3749: rather than the slower MatSetValues().
3750: */
3751: M->was_assembled = PETSC_TRUE;
3752: M->assembled = PETSC_FALSE;
3753: }
3754: MatGetOwnershipRange(M,&rstart,&rend);
3755: aij = (Mat_SeqAIJ*)(Mreuse)->data;
3756: ii = aij->i;
3757: jj = aij->j;
3758: aa = aij->a;
3759: for (i=0; i<m; i++) {
3760: row = rstart + i;
3761: nz = ii[i+1] - ii[i];
3762: cwork = jj; jj += nz;
3763: vwork = aa; aa += nz;
3764: MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);
3765: }
3767: MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);
3768: MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);
3769: *newmat = M;
3771: /* save submatrix used in processor for next request */
3772: if (call == MAT_INITIAL_MATRIX) {
3773: PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);
3774: MatDestroy(&Mreuse);
3775: }
3776: return(0);
3777: }
3779: PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3780: {
3781: PetscInt m,cstart, cend,j,nnz,i,d;
3782: PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3783: const PetscInt *JJ;
3785: PetscBool nooffprocentries;
3788: if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3790: PetscLayoutSetUp(B->rmap);
3791: PetscLayoutSetUp(B->cmap);
3792: m = B->rmap->n;
3793: cstart = B->cmap->rstart;
3794: cend = B->cmap->rend;
3795: rstart = B->rmap->rstart;
3797: PetscCalloc2(m,&d_nnz,m,&o_nnz);
3799: #if defined(PETSC_USE_DEBUG)
3800: for (i=0; i<m; i++) {
3801: nnz = Ii[i+1]- Ii[i];
3802: JJ = J + Ii[i];
3803: if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3804: if (nnz && (JJ[0] < 0)) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,JJ[0]);
3805: if (nnz && (JJ[nnz-1] >= B->cmap->N)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3806: }
3807: #endif
3809: for (i=0; i<m; i++) {
3810: nnz = Ii[i+1]- Ii[i];
3811: JJ = J + Ii[i];
3812: nnz_max = PetscMax(nnz_max,nnz);
3813: d = 0;
3814: for (j=0; j<nnz; j++) {
3815: if (cstart <= JJ[j] && JJ[j] < cend) d++;
3816: }
3817: d_nnz[i] = d;
3818: o_nnz[i] = nnz - d;
3819: }
3820: MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);
3821: PetscFree2(d_nnz,o_nnz);
3823: for (i=0; i<m; i++) {
3824: ii = i + rstart;
3825: MatSetValues_MPIAIJ(B,1,&ii,Ii[i+1] - Ii[i],J+Ii[i], v ? v + Ii[i] : NULL,INSERT_VALUES);
3826: }
3827: nooffprocentries = B->nooffprocentries;
3828: B->nooffprocentries = PETSC_TRUE;
3829: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3830: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3831: B->nooffprocentries = nooffprocentries;
3833: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
3834: return(0);
3835: }
3837: /*@
3838: MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3839: (the default parallel PETSc format).
3841: Collective
3843: Input Parameters:
3844: + B - the matrix
3845: . i - the indices into j for the start of each local row (starts with zero)
3846: . j - the column indices for each local row (starts with zero)
3847: - v - optional values in the matrix
3849: Level: developer
3851: Notes:
3852: The i, j, and v arrays ARE copied by this routine into the internal format used by PETSc;
3853: thus you CANNOT change the matrix entries by changing the values of v[] after you have
3854: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3856: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3858: The format which is used for the sparse matrix input, is equivalent to a
3859: row-major ordering.. i.e for the following matrix, the input data expected is
3860: as shown
3862: $ 1 0 0
3863: $ 2 0 3 P0
3864: $ -------
3865: $ 4 5 6 P1
3866: $
3867: $ Process0 [P0]: rows_owned=[0,1]
3868: $ i = {0,1,3} [size = nrow+1 = 2+1]
3869: $ j = {0,0,2} [size = 3]
3870: $ v = {1,2,3} [size = 3]
3871: $
3872: $ Process1 [P1]: rows_owned=[2]
3873: $ i = {0,3} [size = nrow+1 = 1+1]
3874: $ j = {0,1,2} [size = 3]
3875: $ v = {4,5,6} [size = 3]
3877: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3878: MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3879: @*/
3880: PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3881: {
3885: PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));
3886: return(0);
3887: }
3889: /*@C
3890: MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3891: (the default parallel PETSc format). For good matrix assembly performance
3892: the user should preallocate the matrix storage by setting the parameters
3893: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
3894: performance can be increased by more than a factor of 50.
3896: Collective
3898: Input Parameters:
3899: + B - the matrix
3900: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
3901: (same value is used for all local rows)
3902: . d_nnz - array containing the number of nonzeros in the various rows of the
3903: DIAGONAL portion of the local submatrix (possibly different for each row)
3904: or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3905: The size of this array is equal to the number of local rows, i.e 'm'.
3906: For matrices that will be factored, you must leave room for (and set)
3907: the diagonal entry even if it is zero.
3908: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
3909: submatrix (same value is used for all local rows).
3910: - o_nnz - array containing the number of nonzeros in the various rows of the
3911: OFF-DIAGONAL portion of the local submatrix (possibly different for
3912: each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3913: structure. The size of this array is equal to the number
3914: of local rows, i.e 'm'.
3916: If the *_nnz parameter is given then the *_nz parameter is ignored
3918: The AIJ format (also called the Yale sparse matrix format or
3919: compressed row storage (CSR)), is fully compatible with standard Fortran 77
3920: storage. The stored row and column indices begin with zero.
3921: See Users-Manual: ch_mat for details.
3923: The parallel matrix is partitioned such that the first m0 rows belong to
3924: process 0, the next m1 rows belong to process 1, the next m2 rows belong
3925: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3927: The DIAGONAL portion of the local submatrix of a processor can be defined
3928: as the submatrix which is obtained by extraction the part corresponding to
3929: the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3930: first row that belongs to the processor, r2 is the last row belonging to
3931: the this processor, and c1-c2 is range of indices of the local part of a
3932: vector suitable for applying the matrix to. This is an mxn matrix. In the
3933: common case of a square matrix, the row and column ranges are the same and
3934: the DIAGONAL part is also square. The remaining portion of the local
3935: submatrix (mxN) constitute the OFF-DIAGONAL portion.
3937: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3939: You can call MatGetInfo() to get information on how effective the preallocation was;
3940: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3941: You can also run with the option -info and look for messages with the string
3942: malloc in them to see if additional memory allocation was needed.
3944: Example usage:
3946: Consider the following 8x8 matrix with 34 non-zero values, that is
3947: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3948: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3949: as follows:
3951: .vb
3952: 1 2 0 | 0 3 0 | 0 4
3953: Proc0 0 5 6 | 7 0 0 | 8 0
3954: 9 0 10 | 11 0 0 | 12 0
3955: -------------------------------------
3956: 13 0 14 | 15 16 17 | 0 0
3957: Proc1 0 18 0 | 19 20 21 | 0 0
3958: 0 0 0 | 22 23 0 | 24 0
3959: -------------------------------------
3960: Proc2 25 26 27 | 0 0 28 | 29 0
3961: 30 0 0 | 31 32 33 | 0 34
3962: .ve
3964: This can be represented as a collection of submatrices as:
3966: .vb
3967: A B C
3968: D E F
3969: G H I
3970: .ve
3972: Where the submatrices A,B,C are owned by proc0, D,E,F are
3973: owned by proc1, G,H,I are owned by proc2.
3975: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3976: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3977: The 'M','N' parameters are 8,8, and have the same values on all procs.
3979: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3980: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3981: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3982: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3983: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3984: matrix, ans [DF] as another SeqAIJ matrix.
3986: When d_nz, o_nz parameters are specified, d_nz storage elements are
3987: allocated for every row of the local diagonal submatrix, and o_nz
3988: storage locations are allocated for every row of the OFF-DIAGONAL submat.
3989: One way to choose d_nz and o_nz is to use the max nonzerors per local
3990: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3991: In this case, the values of d_nz,o_nz are:
3992: .vb
3993: proc0 : dnz = 2, o_nz = 2
3994: proc1 : dnz = 3, o_nz = 2
3995: proc2 : dnz = 1, o_nz = 4
3996: .ve
3997: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3998: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3999: for proc3. i.e we are using 12+15+10=37 storage locations to store
4000: 34 values.
4002: When d_nnz, o_nnz parameters are specified, the storage is specified
4003: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4004: In the above case the values for d_nnz,o_nnz are:
4005: .vb
4006: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4007: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4008: proc2: d_nnz = [1,1] and o_nnz = [4,4]
4009: .ve
4010: Here the space allocated is sum of all the above values i.e 34, and
4011: hence pre-allocation is perfect.
4013: Level: intermediate
4015: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
4016: MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
4017: @*/
4018: PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
4019: {
4025: PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));
4026: return(0);
4027: }
4029: /*@
4030: MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
4031: CSR format for the local rows.
4033: Collective
4035: Input Parameters:
4036: + comm - MPI communicator
4037: . m - number of local rows (Cannot be PETSC_DECIDE)
4038: . n - This value should be the same as the local size used in creating the
4039: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4040: calculated if N is given) For square matrices n is almost always m.
4041: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4042: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4043: . i - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4044: . j - column indices
4045: - a - matrix values
4047: Output Parameter:
4048: . mat - the matrix
4050: Level: intermediate
4052: Notes:
4053: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
4054: thus you CANNOT change the matrix entries by changing the values of a[] after you have
4055: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
4057: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
4059: The format which is used for the sparse matrix input, is equivalent to a
4060: row-major ordering.. i.e for the following matrix, the input data expected is
4061: as shown
4063: Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays
4065: $ 1 0 0
4066: $ 2 0 3 P0
4067: $ -------
4068: $ 4 5 6 P1
4069: $
4070: $ Process0 [P0]: rows_owned=[0,1]
4071: $ i = {0,1,3} [size = nrow+1 = 2+1]
4072: $ j = {0,0,2} [size = 3]
4073: $ v = {1,2,3} [size = 3]
4074: $
4075: $ Process1 [P1]: rows_owned=[2]
4076: $ i = {0,3} [size = nrow+1 = 1+1]
4077: $ j = {0,1,2} [size = 3]
4078: $ v = {4,5,6} [size = 3]
4080: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4081: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4082: @*/
4083: PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
4084: {
4088: if (i && i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4089: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4090: MatCreate(comm,mat);
4091: MatSetSizes(*mat,m,n,M,N);
4092: /* MatSetBlockSizes(M,bs,cbs); */
4093: MatSetType(*mat,MATMPIAIJ);
4094: MatMPIAIJSetPreallocationCSR(*mat,i,j,a);
4095: return(0);
4096: }
4098: /*@
4099: MatUpdateMPIAIJWithArrays - updates a MPI AIJ matrix using arrays that contain in standard
4100: CSR format for the local rows. Only the numerical values are updated the other arrays must be identical
4102: Collective
4104: Input Parameters:
4105: + mat - the matrix
4106: . m - number of local rows (Cannot be PETSC_DECIDE)
4107: . n - This value should be the same as the local size used in creating the
4108: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4109: calculated if N is given) For square matrices n is almost always m.
4110: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4111: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4112: . Ii - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4113: . J - column indices
4114: - v - matrix values
4116: Level: intermediate
4118: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4119: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays(), MatUpdateMPIAIJWithArrays()
4120: @*/
4121: PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
4122: {
4124: PetscInt cstart,nnz,i,j;
4125: PetscInt *ld;
4126: PetscBool nooffprocentries;
4127: Mat_MPIAIJ *Aij = (Mat_MPIAIJ*)mat->data;
4128: Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)Aij->A->data, *Ao = (Mat_SeqAIJ*)Aij->B->data;
4129: PetscScalar *ad = Ad->a, *ao = Ao->a;
4130: const PetscInt *Adi = Ad->i;
4131: PetscInt ldi,Iii,md;
4134: if (Ii[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
4135: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
4136: if (m != mat->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4137: if (n != mat->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");
4139: cstart = mat->cmap->rstart;
4140: if (!Aij->ld) {
4141: /* count number of entries below block diagonal */
4142: PetscCalloc1(m,&ld);
4143: Aij->ld = ld;
4144: for (i=0; i<m; i++) {
4145: nnz = Ii[i+1]- Ii[i];
4146: j = 0;
4147: while (J[j] < cstart && j < nnz) {j++;}
4148: J += nnz;
4149: ld[i] = j;
4150: }
4151: } else {
4152: ld = Aij->ld;
4153: }
4155: for (i=0; i<m; i++) {
4156: nnz = Ii[i+1]- Ii[i];
4157: Iii = Ii[i];
4158: ldi = ld[i];
4159: md = Adi[i+1]-Adi[i];
4160: PetscArraycpy(ao,v + Iii,ldi);
4161: PetscArraycpy(ad,v + Iii + ldi,md);
4162: PetscArraycpy(ao + ldi,v + Iii + ldi + md,nnz - ldi - md);
4163: ad += md;
4164: ao += nnz - md;
4165: }
4166: nooffprocentries = mat->nooffprocentries;
4167: mat->nooffprocentries = PETSC_TRUE;
4168: PetscObjectStateIncrease((PetscObject)Aij->A);
4169: PetscObjectStateIncrease((PetscObject)Aij->B);
4170: PetscObjectStateIncrease((PetscObject)mat);
4171: MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);
4172: MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);
4173: mat->nooffprocentries = nooffprocentries;
4174: return(0);
4175: }
4177: /*@C
4178: MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
4179: (the default parallel PETSc format). For good matrix assembly performance
4180: the user should preallocate the matrix storage by setting the parameters
4181: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
4182: performance can be increased by more than a factor of 50.
4184: Collective
4186: Input Parameters:
4187: + comm - MPI communicator
4188: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
4189: This value should be the same as the local size used in creating the
4190: y vector for the matrix-vector product y = Ax.
4191: . n - This value should be the same as the local size used in creating the
4192: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
4193: calculated if N is given) For square matrices n is almost always m.
4194: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
4195: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
4196: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
4197: (same value is used for all local rows)
4198: . d_nnz - array containing the number of nonzeros in the various rows of the
4199: DIAGONAL portion of the local submatrix (possibly different for each row)
4200: or NULL, if d_nz is used to specify the nonzero structure.
4201: The size of this array is equal to the number of local rows, i.e 'm'.
4202: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
4203: submatrix (same value is used for all local rows).
4204: - o_nnz - array containing the number of nonzeros in the various rows of the
4205: OFF-DIAGONAL portion of the local submatrix (possibly different for
4206: each row) or NULL, if o_nz is used to specify the nonzero
4207: structure. The size of this array is equal to the number
4208: of local rows, i.e 'm'.
4210: Output Parameter:
4211: . A - the matrix
4213: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
4214: MatXXXXSetPreallocation() paradigm instead of this routine directly.
4215: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
4217: Notes:
4218: If the *_nnz parameter is given then the *_nz parameter is ignored
4220: m,n,M,N parameters specify the size of the matrix, and its partitioning across
4221: processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
4222: storage requirements for this matrix.
4224: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
4225: processor than it must be used on all processors that share the object for
4226: that argument.
4228: The user MUST specify either the local or global matrix dimensions
4229: (possibly both).
4231: The parallel matrix is partitioned across processors such that the
4232: first m0 rows belong to process 0, the next m1 rows belong to
4233: process 1, the next m2 rows belong to process 2 etc.. where
4234: m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4235: values corresponding to [m x N] submatrix.
4237: The columns are logically partitioned with the n0 columns belonging
4238: to 0th partition, the next n1 columns belonging to the next
4239: partition etc.. where n0,n1,n2... are the input parameter 'n'.
4241: The DIAGONAL portion of the local submatrix on any given processor
4242: is the submatrix corresponding to the rows and columns m,n
4243: corresponding to the given processor. i.e diagonal matrix on
4244: process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4245: etc. The remaining portion of the local submatrix [m x (N-n)]
4246: constitute the OFF-DIAGONAL portion. The example below better
4247: illustrates this concept.
4249: For a square global matrix we define each processor's diagonal portion
4250: to be its local rows and the corresponding columns (a square submatrix);
4251: each processor's off-diagonal portion encompasses the remainder of the
4252: local matrix (a rectangular submatrix).
4254: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
4256: When calling this routine with a single process communicator, a matrix of
4257: type SEQAIJ is returned. If a matrix of type MPIAIJ is desired for this
4258: type of communicator, use the construction mechanism
4259: .vb
4260: MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
4261: .ve
4263: $ MatCreate(...,&A);
4264: $ MatSetType(A,MATMPIAIJ);
4265: $ MatSetSizes(A, m,n,M,N);
4266: $ MatMPIAIJSetPreallocation(A,...);
4268: By default, this format uses inodes (identical nodes) when possible.
4269: We search for consecutive rows with the same nonzero structure, thereby
4270: reusing matrix information to achieve increased efficiency.
4272: Options Database Keys:
4273: + -mat_no_inode - Do not use inodes
4274: - -mat_inode_limit <limit> - Sets inode limit (max limit=5)
4278: Example usage:
4280: Consider the following 8x8 matrix with 34 non-zero values, that is
4281: assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4282: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4283: as follows
4285: .vb
4286: 1 2 0 | 0 3 0 | 0 4
4287: Proc0 0 5 6 | 7 0 0 | 8 0
4288: 9 0 10 | 11 0 0 | 12 0
4289: -------------------------------------
4290: 13 0 14 | 15 16 17 | 0 0
4291: Proc1 0 18 0 | 19 20 21 | 0 0
4292: 0 0 0 | 22 23 0 | 24 0
4293: -------------------------------------
4294: Proc2 25 26 27 | 0 0 28 | 29 0
4295: 30 0 0 | 31 32 33 | 0 34
4296: .ve
4298: This can be represented as a collection of submatrices as
4300: .vb
4301: A B C
4302: D E F
4303: G H I
4304: .ve
4306: Where the submatrices A,B,C are owned by proc0, D,E,F are
4307: owned by proc1, G,H,I are owned by proc2.
4309: The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4310: The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4311: The 'M','N' parameters are 8,8, and have the same values on all procs.
4313: The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4314: submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4315: corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4316: Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4317: part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4318: matrix, ans [DF] as another SeqAIJ matrix.
4320: When d_nz, o_nz parameters are specified, d_nz storage elements are
4321: allocated for every row of the local diagonal submatrix, and o_nz
4322: storage locations are allocated for every row of the OFF-DIAGONAL submat.
4323: One way to choose d_nz and o_nz is to use the max nonzerors per local
4324: rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4325: In this case, the values of d_nz,o_nz are
4326: .vb
4327: proc0 : dnz = 2, o_nz = 2
4328: proc1 : dnz = 3, o_nz = 2
4329: proc2 : dnz = 1, o_nz = 4
4330: .ve
4331: We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4332: translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4333: for proc3. i.e we are using 12+15+10=37 storage locations to store
4334: 34 values.
4336: When d_nnz, o_nnz parameters are specified, the storage is specified
4337: for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4338: In the above case the values for d_nnz,o_nnz are
4339: .vb
4340: proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4341: proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4342: proc2: d_nnz = [1,1] and o_nnz = [4,4]
4343: .ve
4344: Here the space allocated is sum of all the above values i.e 34, and
4345: hence pre-allocation is perfect.
4347: Level: intermediate
4349: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4350: MATMPIAIJ, MatCreateMPIAIJWithArrays()
4351: @*/
4352: PetscErrorCode MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4353: {
4355: PetscMPIInt size;
4358: MatCreate(comm,A);
4359: MatSetSizes(*A,m,n,M,N);
4360: MPI_Comm_size(comm,&size);
4361: if (size > 1) {
4362: MatSetType(*A,MATMPIAIJ);
4363: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
4364: } else {
4365: MatSetType(*A,MATSEQAIJ);
4366: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
4367: }
4368: return(0);
4369: }
4371: /*@C
4372: MatMPIAIJGetSeqAIJ - Returns the local piece of this distributed matrix
4373:
4374: Not collective
4375:
4376: Input Parameter:
4377: . A - The MPIAIJ matrix
4379: Output Parameters:
4380: + Ad - The local diagonal block as a SeqAIJ matrix
4381: . Ao - The local off-diagonal block as a SeqAIJ matrix
4382: - colmap - An array mapping local column numbers of Ao to global column numbers of the parallel matrix
4384: Note: The rows in Ad and Ao are in [0, Nr), where Nr is the number of local rows on this process. The columns
4385: in Ad are in [0, Nc) where Nc is the number of local columns. The columns are Ao are in [0, Nco), where Nco is
4386: the number of nonzero columns in the local off-diagonal piece of the matrix A. The array colmap maps these
4387: local column numbers to global column numbers in the original matrix.
4389: Level: intermediate
4391: .seealso: MatMPIAIJGetLocalMat(), MatMPIAIJGetLocalMatCondensed(), MatCreateAIJ(), MATMPIAJ, MATSEQAIJ
4392: @*/
4393: PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4394: {
4395: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
4396: PetscBool flg;
4400: PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&flg);
4401: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4402: if (Ad) *Ad = a->A;
4403: if (Ao) *Ao = a->B;
4404: if (colmap) *colmap = a->garray;
4405: return(0);
4406: }
4408: PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4409: {
4411: PetscInt m,N,i,rstart,nnz,Ii;
4412: PetscInt *indx;
4413: PetscScalar *values;
4416: MatGetSize(inmat,&m,&N);
4417: if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4418: PetscInt *dnz,*onz,sum,bs,cbs;
4420: if (n == PETSC_DECIDE) {
4421: PetscSplitOwnership(comm,&n,&N);
4422: }
4423: /* Check sum(n) = N */
4424: MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);
4425: if (sum != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns %D != global columns %D",sum,N);
4427: MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);
4428: rstart -= m;
4430: MatPreallocateInitialize(comm,m,n,dnz,onz);
4431: for (i=0; i<m; i++) {
4432: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4433: MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);
4434: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);
4435: }
4437: MatCreate(comm,outmat);
4438: MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4439: MatGetBlockSizes(inmat,&bs,&cbs);
4440: MatSetBlockSizes(*outmat,bs,cbs);
4441: MatSetType(*outmat,MATAIJ);
4442: MatSeqAIJSetPreallocation(*outmat,0,dnz);
4443: MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);
4444: MatPreallocateFinalize(dnz,onz);
4445: }
4447: /* numeric phase */
4448: MatGetOwnershipRange(*outmat,&rstart,NULL);
4449: for (i=0; i<m; i++) {
4450: MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4451: Ii = i + rstart;
4452: MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);
4453: MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);
4454: }
4455: MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);
4456: MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);
4457: return(0);
4458: }
4460: PetscErrorCode MatFileSplit(Mat A,char *outfile)
4461: {
4462: PetscErrorCode ierr;
4463: PetscMPIInt rank;
4464: PetscInt m,N,i,rstart,nnz;
4465: size_t len;
4466: const PetscInt *indx;
4467: PetscViewer out;
4468: char *name;
4469: Mat B;
4470: const PetscScalar *values;
4473: MatGetLocalSize(A,&m,0);
4474: MatGetSize(A,0,&N);
4475: /* Should this be the type of the diagonal block of A? */
4476: MatCreate(PETSC_COMM_SELF,&B);
4477: MatSetSizes(B,m,N,m,N);
4478: MatSetBlockSizesFromMats(B,A,A);
4479: MatSetType(B,MATSEQAIJ);
4480: MatSeqAIJSetPreallocation(B,0,NULL);
4481: MatGetOwnershipRange(A,&rstart,0);
4482: for (i=0; i<m; i++) {
4483: MatGetRow(A,i+rstart,&nnz,&indx,&values);
4484: MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);
4485: MatRestoreRow(A,i+rstart,&nnz,&indx,&values);
4486: }
4487: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
4488: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
4490: MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);
4491: PetscStrlen(outfile,&len);
4492: PetscMalloc1(len+5,&name);
4493: sprintf(name,"%s.%d",outfile,rank);
4494: PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);
4495: PetscFree(name);
4496: MatView(B,out);
4497: PetscViewerDestroy(&out);
4498: MatDestroy(&B);
4499: return(0);
4500: }
4502: PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4503: {
4504: PetscErrorCode ierr;
4505: Mat_Merge_SeqsToMPI *merge;
4506: PetscContainer container;
4509: PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);
4510: if (container) {
4511: PetscContainerGetPointer(container,(void**)&merge);
4512: PetscFree(merge->id_r);
4513: PetscFree(merge->len_s);
4514: PetscFree(merge->len_r);
4515: PetscFree(merge->bi);
4516: PetscFree(merge->bj);
4517: PetscFree(merge->buf_ri[0]);
4518: PetscFree(merge->buf_ri);
4519: PetscFree(merge->buf_rj[0]);
4520: PetscFree(merge->buf_rj);
4521: PetscFree(merge->coi);
4522: PetscFree(merge->coj);
4523: PetscFree(merge->owners_co);
4524: PetscLayoutDestroy(&merge->rowmap);
4525: PetscFree(merge);
4526: PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);
4527: }
4528: MatDestroy_MPIAIJ(A);
4529: return(0);
4530: }
4532: #include <../src/mat/utils/freespace.h>
4533: #include <petscbt.h>
4535: PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4536: {
4537: PetscErrorCode ierr;
4538: MPI_Comm comm;
4539: Mat_SeqAIJ *a =(Mat_SeqAIJ*)seqmat->data;
4540: PetscMPIInt size,rank,taga,*len_s;
4541: PetscInt N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4542: PetscInt proc,m;
4543: PetscInt **buf_ri,**buf_rj;
4544: PetscInt k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4545: PetscInt nrows,**buf_ri_k,**nextrow,**nextai;
4546: MPI_Request *s_waits,*r_waits;
4547: MPI_Status *status;
4548: MatScalar *aa=a->a;
4549: MatScalar **abuf_r,*ba_i;
4550: Mat_Merge_SeqsToMPI *merge;
4551: PetscContainer container;
4554: PetscObjectGetComm((PetscObject)mpimat,&comm);
4555: PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);
4557: MPI_Comm_size(comm,&size);
4558: MPI_Comm_rank(comm,&rank);
4560: PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);
4561: PetscContainerGetPointer(container,(void**)&merge);
4563: bi = merge->bi;
4564: bj = merge->bj;
4565: buf_ri = merge->buf_ri;
4566: buf_rj = merge->buf_rj;
4568: PetscMalloc1(size,&status);
4569: owners = merge->rowmap->range;
4570: len_s = merge->len_s;
4572: /* send and recv matrix values */
4573: /*-----------------------------*/
4574: PetscObjectGetNewTag((PetscObject)mpimat,&taga);
4575: PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);
4577: PetscMalloc1(merge->nsend+1,&s_waits);
4578: for (proc=0,k=0; proc<size; proc++) {
4579: if (!len_s[proc]) continue;
4580: i = owners[proc];
4581: MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
4582: k++;
4583: }
4585: if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
4586: if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}
4587: PetscFree(status);
4589: PetscFree(s_waits);
4590: PetscFree(r_waits);
4592: /* insert mat values of mpimat */
4593: /*----------------------------*/
4594: PetscMalloc1(N,&ba_i);
4595: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4597: for (k=0; k<merge->nrecv; k++) {
4598: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4599: nrows = *(buf_ri_k[k]);
4600: nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */
4601: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4602: }
4604: /* set values of ba */
4605: m = merge->rowmap->n;
4606: for (i=0; i<m; i++) {
4607: arow = owners[rank] + i;
4608: bj_i = bj+bi[i]; /* col indices of the i-th row of mpimat */
4609: bnzi = bi[i+1] - bi[i];
4610: PetscArrayzero(ba_i,bnzi);
4612: /* add local non-zero vals of this proc's seqmat into ba */
4613: anzi = ai[arow+1] - ai[arow];
4614: aj = a->j + ai[arow];
4615: aa = a->a + ai[arow];
4616: nextaj = 0;
4617: for (j=0; nextaj<anzi; j++) {
4618: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4619: ba_i[j] += aa[nextaj++];
4620: }
4621: }
4623: /* add received vals into ba */
4624: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4625: /* i-th row */
4626: if (i == *nextrow[k]) {
4627: anzi = *(nextai[k]+1) - *nextai[k];
4628: aj = buf_rj[k] + *(nextai[k]);
4629: aa = abuf_r[k] + *(nextai[k]);
4630: nextaj = 0;
4631: for (j=0; nextaj<anzi; j++) {
4632: if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4633: ba_i[j] += aa[nextaj++];
4634: }
4635: }
4636: nextrow[k]++; nextai[k]++;
4637: }
4638: }
4639: MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);
4640: }
4641: MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);
4642: MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);
4644: PetscFree(abuf_r[0]);
4645: PetscFree(abuf_r);
4646: PetscFree(ba_i);
4647: PetscFree3(buf_ri_k,nextrow,nextai);
4648: PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);
4649: return(0);
4650: }
4652: PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4653: {
4654: PetscErrorCode ierr;
4655: Mat B_mpi;
4656: Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data;
4657: PetscMPIInt size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4658: PetscInt **buf_rj,**buf_ri,**buf_ri_k;
4659: PetscInt M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4660: PetscInt len,proc,*dnz,*onz,bs,cbs;
4661: PetscInt k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4662: PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4663: MPI_Request *si_waits,*sj_waits,*ri_waits,*rj_waits;
4664: MPI_Status *status;
4665: PetscFreeSpaceList free_space=NULL,current_space=NULL;
4666: PetscBT lnkbt;
4667: Mat_Merge_SeqsToMPI *merge;
4668: PetscContainer container;
4671: PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);
4673: /* make sure it is a PETSc comm */
4674: PetscCommDuplicate(comm,&comm,NULL);
4675: MPI_Comm_size(comm,&size);
4676: MPI_Comm_rank(comm,&rank);
4678: PetscNew(&merge);
4679: PetscMalloc1(size,&status);
4681: /* determine row ownership */
4682: /*---------------------------------------------------------*/
4683: PetscLayoutCreate(comm,&merge->rowmap);
4684: PetscLayoutSetLocalSize(merge->rowmap,m);
4685: PetscLayoutSetSize(merge->rowmap,M);
4686: PetscLayoutSetBlockSize(merge->rowmap,1);
4687: PetscLayoutSetUp(merge->rowmap);
4688: PetscMalloc1(size,&len_si);
4689: PetscMalloc1(size,&merge->len_s);
4691: m = merge->rowmap->n;
4692: owners = merge->rowmap->range;
4694: /* determine the number of messages to send, their lengths */
4695: /*---------------------------------------------------------*/
4696: len_s = merge->len_s;
4698: len = 0; /* length of buf_si[] */
4699: merge->nsend = 0;
4700: for (proc=0; proc<size; proc++) {
4701: len_si[proc] = 0;
4702: if (proc == rank) {
4703: len_s[proc] = 0;
4704: } else {
4705: len_si[proc] = owners[proc+1] - owners[proc] + 1;
4706: len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4707: }
4708: if (len_s[proc]) {
4709: merge->nsend++;
4710: nrows = 0;
4711: for (i=owners[proc]; i<owners[proc+1]; i++) {
4712: if (ai[i+1] > ai[i]) nrows++;
4713: }
4714: len_si[proc] = 2*(nrows+1);
4715: len += len_si[proc];
4716: }
4717: }
4719: /* determine the number and length of messages to receive for ij-structure */
4720: /*-------------------------------------------------------------------------*/
4721: PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
4722: PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);
4724: /* post the Irecv of j-structure */
4725: /*-------------------------------*/
4726: PetscCommGetNewTag(comm,&tagj);
4727: PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);
4729: /* post the Isend of j-structure */
4730: /*--------------------------------*/
4731: PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);
4733: for (proc=0, k=0; proc<size; proc++) {
4734: if (!len_s[proc]) continue;
4735: i = owners[proc];
4736: MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);
4737: k++;
4738: }
4740: /* receives and sends of j-structure are complete */
4741: /*------------------------------------------------*/
4742: if (merge->nrecv) {MPI_Waitall(merge->nrecv,rj_waits,status);}
4743: if (merge->nsend) {MPI_Waitall(merge->nsend,sj_waits,status);}
4745: /* send and recv i-structure */
4746: /*---------------------------*/
4747: PetscCommGetNewTag(comm,&tagi);
4748: PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);
4750: PetscMalloc1(len+1,&buf_s);
4751: buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4752: for (proc=0,k=0; proc<size; proc++) {
4753: if (!len_s[proc]) continue;
4754: /* form outgoing message for i-structure:
4755: buf_si[0]: nrows to be sent
4756: [1:nrows]: row index (global)
4757: [nrows+1:2*nrows+1]: i-structure index
4758: */
4759: /*-------------------------------------------*/
4760: nrows = len_si[proc]/2 - 1;
4761: buf_si_i = buf_si + nrows+1;
4762: buf_si[0] = nrows;
4763: buf_si_i[0] = 0;
4764: nrows = 0;
4765: for (i=owners[proc]; i<owners[proc+1]; i++) {
4766: anzi = ai[i+1] - ai[i];
4767: if (anzi) {
4768: buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4769: buf_si[nrows+1] = i-owners[proc]; /* local row index */
4770: nrows++;
4771: }
4772: }
4773: MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);
4774: k++;
4775: buf_si += len_si[proc];
4776: }
4778: if (merge->nrecv) {MPI_Waitall(merge->nrecv,ri_waits,status);}
4779: if (merge->nsend) {MPI_Waitall(merge->nsend,si_waits,status);}
4781: PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);
4782: for (i=0; i<merge->nrecv; i++) {
4783: PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);
4784: }
4786: PetscFree(len_si);
4787: PetscFree(len_ri);
4788: PetscFree(rj_waits);
4789: PetscFree2(si_waits,sj_waits);
4790: PetscFree(ri_waits);
4791: PetscFree(buf_s);
4792: PetscFree(status);
4794: /* compute a local seq matrix in each processor */
4795: /*----------------------------------------------*/
4796: /* allocate bi array and free space for accumulating nonzero column info */
4797: PetscMalloc1(m+1,&bi);
4798: bi[0] = 0;
4800: /* create and initialize a linked list */
4801: nlnk = N+1;
4802: PetscLLCreate(N,N,nlnk,lnk,lnkbt);
4804: /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4805: len = ai[owners[rank+1]] - ai[owners[rank]];
4806: PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);
4808: current_space = free_space;
4810: /* determine symbolic info for each local row */
4811: PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);
4813: for (k=0; k<merge->nrecv; k++) {
4814: buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4815: nrows = *buf_ri_k[k];
4816: nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */
4817: nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */
4818: }
4820: MatPreallocateInitialize(comm,m,n,dnz,onz);
4821: len = 0;
4822: for (i=0; i<m; i++) {
4823: bnzi = 0;
4824: /* add local non-zero cols of this proc's seqmat into lnk */
4825: arow = owners[rank] + i;
4826: anzi = ai[arow+1] - ai[arow];
4827: aj = a->j + ai[arow];
4828: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4829: bnzi += nlnk;
4830: /* add received col data into lnk */
4831: for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4832: if (i == *nextrow[k]) { /* i-th row */
4833: anzi = *(nextai[k]+1) - *nextai[k];
4834: aj = buf_rj[k] + *nextai[k];
4835: PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);
4836: bnzi += nlnk;
4837: nextrow[k]++; nextai[k]++;
4838: }
4839: }
4840: if (len < bnzi) len = bnzi; /* =max(bnzi) */
4842: /* if free space is not available, make more free space */
4843: if (current_space->local_remaining<bnzi) {
4844: PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),¤t_space);
4845: nspacedouble++;
4846: }
4847: /* copy data into free space, then initialize lnk */
4848: PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);
4849: MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);
4851: current_space->array += bnzi;
4852: current_space->local_used += bnzi;
4853: current_space->local_remaining -= bnzi;
4855: bi[i+1] = bi[i] + bnzi;
4856: }
4858: PetscFree3(buf_ri_k,nextrow,nextai);
4860: PetscMalloc1(bi[m]+1,&bj);
4861: PetscFreeSpaceContiguous(&free_space,bj);
4862: PetscLLDestroy(lnk,lnkbt);
4864: /* create symbolic parallel matrix B_mpi */
4865: /*---------------------------------------*/
4866: MatGetBlockSizes(seqmat,&bs,&cbs);
4867: MatCreate(comm,&B_mpi);
4868: if (n==PETSC_DECIDE) {
4869: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);
4870: } else {
4871: MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
4872: }
4873: MatSetBlockSizes(B_mpi,bs,cbs);
4874: MatSetType(B_mpi,MATMPIAIJ);
4875: MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);
4876: MatPreallocateFinalize(dnz,onz);
4877: MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
4879: /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4880: B_mpi->assembled = PETSC_FALSE;
4881: B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4882: merge->bi = bi;
4883: merge->bj = bj;
4884: merge->buf_ri = buf_ri;
4885: merge->buf_rj = buf_rj;
4886: merge->coi = NULL;
4887: merge->coj = NULL;
4888: merge->owners_co = NULL;
4890: PetscCommDestroy(&comm);
4892: /* attach the supporting struct to B_mpi for reuse */
4893: PetscContainerCreate(PETSC_COMM_SELF,&container);
4894: PetscContainerSetPointer(container,merge);
4895: PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);
4896: PetscContainerDestroy(&container);
4897: *mpimat = B_mpi;
4899: PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);
4900: return(0);
4901: }
4903: /*@C
4904: MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4905: matrices from each processor
4907: Collective
4909: Input Parameters:
4910: + comm - the communicators the parallel matrix will live on
4911: . seqmat - the input sequential matrices
4912: . m - number of local rows (or PETSC_DECIDE)
4913: . n - number of local columns (or PETSC_DECIDE)
4914: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4916: Output Parameter:
4917: . mpimat - the parallel matrix generated
4919: Level: advanced
4921: Notes:
4922: The dimensions of the sequential matrix in each processor MUST be the same.
4923: The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4924: destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4925: @*/
4926: PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4927: {
4929: PetscMPIInt size;
4932: MPI_Comm_size(comm,&size);
4933: if (size == 1) {
4934: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4935: if (scall == MAT_INITIAL_MATRIX) {
4936: MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
4937: } else {
4938: MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
4939: }
4940: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4941: return(0);
4942: }
4943: PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);
4944: if (scall == MAT_INITIAL_MATRIX) {
4945: MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);
4946: }
4947: MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);
4948: PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);
4949: return(0);
4950: }
4952: /*@
4953: MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential matrix with
4954: mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4955: with MatGetSize()
4957: Not Collective
4959: Input Parameters:
4960: + A - the matrix
4961: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4963: Output Parameter:
4964: . A_loc - the local sequential matrix generated
4966: Level: developer
4968: Notes:
4969: When the communicator associated with A has size 1 and MAT_INITIAL_MATRIX is requested, the matrix returned is the diagonal part of A.
4970: If MAT_REUSE_MATRIX is requested with comm size 1, MatCopy(Adiag,*A_loc,SAME_NONZERO_PATTERN) is called.
4971: This means that one can preallocate the proper sequential matrix first and then call this routine with MAT_REUSE_MATRIX to safely
4972: modify the values of the returned A_loc.
4974: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMatCondensed()
4976: @*/
4977: PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4978: {
4980: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
4981: Mat_SeqAIJ *mat,*a,*b;
4982: PetscInt *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4983: MatScalar *aa,*ba,*cam;
4984: PetscScalar *ca;
4985: PetscMPIInt size;
4986: PetscInt am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4987: PetscInt *ci,*cj,col,ncols_d,ncols_o,jo;
4988: PetscBool match;
4991: PetscStrbeginswith(((PetscObject)A)->type_name,MATMPIAIJ,&match);
4992: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4993: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
4994: if (size == 1) {
4995: if (scall == MAT_INITIAL_MATRIX) {
4996: PetscObjectReference((PetscObject)mpimat->A);
4997: *A_loc = mpimat->A;
4998: } else if (scall == MAT_REUSE_MATRIX) {
4999: MatCopy(mpimat->A,*A_loc,SAME_NONZERO_PATTERN);
5000: }
5001: return(0);
5002: }
5004: PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);
5005: a = (Mat_SeqAIJ*)(mpimat->A)->data;
5006: b = (Mat_SeqAIJ*)(mpimat->B)->data;
5007: ai = a->i; aj = a->j; bi = b->i; bj = b->j;
5008: aa = a->a; ba = b->a;
5009: if (scall == MAT_INITIAL_MATRIX) {
5010: PetscMalloc1(1+am,&ci);
5011: ci[0] = 0;
5012: for (i=0; i<am; i++) {
5013: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
5014: }
5015: PetscMalloc1(1+ci[am],&cj);
5016: PetscMalloc1(1+ci[am],&ca);
5017: k = 0;
5018: for (i=0; i<am; i++) {
5019: ncols_o = bi[i+1] - bi[i];
5020: ncols_d = ai[i+1] - ai[i];
5021: /* off-diagonal portion of A */
5022: for (jo=0; jo<ncols_o; jo++) {
5023: col = cmap[*bj];
5024: if (col >= cstart) break;
5025: cj[k] = col; bj++;
5026: ca[k++] = *ba++;
5027: }
5028: /* diagonal portion of A */
5029: for (j=0; j<ncols_d; j++) {
5030: cj[k] = cstart + *aj++;
5031: ca[k++] = *aa++;
5032: }
5033: /* off-diagonal portion of A */
5034: for (j=jo; j<ncols_o; j++) {
5035: cj[k] = cmap[*bj++];
5036: ca[k++] = *ba++;
5037: }
5038: }
5039: /* put together the new matrix */
5040: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);
5041: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5042: /* Since these are PETSc arrays, change flags to free them as necessary. */
5043: mat = (Mat_SeqAIJ*)(*A_loc)->data;
5044: mat->free_a = PETSC_TRUE;
5045: mat->free_ij = PETSC_TRUE;
5046: mat->nonew = 0;
5047: } else if (scall == MAT_REUSE_MATRIX) {
5048: mat=(Mat_SeqAIJ*)(*A_loc)->data;
5049: ci = mat->i; cj = mat->j; cam = mat->a;
5050: for (i=0; i<am; i++) {
5051: /* off-diagonal portion of A */
5052: ncols_o = bi[i+1] - bi[i];
5053: for (jo=0; jo<ncols_o; jo++) {
5054: col = cmap[*bj];
5055: if (col >= cstart) break;
5056: *cam++ = *ba++; bj++;
5057: }
5058: /* diagonal portion of A */
5059: ncols_d = ai[i+1] - ai[i];
5060: for (j=0; j<ncols_d; j++) *cam++ = *aa++;
5061: /* off-diagonal portion of A */
5062: for (j=jo; j<ncols_o; j++) {
5063: *cam++ = *ba++; bj++;
5064: }
5065: }
5066: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
5067: PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);
5068: return(0);
5069: }
5071: /*@C
5072: MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns
5074: Not Collective
5076: Input Parameters:
5077: + A - the matrix
5078: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5079: - row, col - index sets of rows and columns to extract (or NULL)
5081: Output Parameter:
5082: . A_loc - the local sequential matrix generated
5084: Level: developer
5086: .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()
5088: @*/
5089: PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
5090: {
5091: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5093: PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
5094: IS isrowa,iscola;
5095: Mat *aloc;
5096: PetscBool match;
5099: PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);
5100: if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
5101: PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);
5102: if (!row) {
5103: start = A->rmap->rstart; end = A->rmap->rend;
5104: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);
5105: } else {
5106: isrowa = *row;
5107: }
5108: if (!col) {
5109: start = A->cmap->rstart;
5110: cmap = a->garray;
5111: nzA = a->A->cmap->n;
5112: nzB = a->B->cmap->n;
5113: PetscMalloc1(nzA+nzB, &idx);
5114: ncols = 0;
5115: for (i=0; i<nzB; i++) {
5116: if (cmap[i] < start) idx[ncols++] = cmap[i];
5117: else break;
5118: }
5119: imark = i;
5120: for (i=0; i<nzA; i++) idx[ncols++] = start + i;
5121: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
5122: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);
5123: } else {
5124: iscola = *col;
5125: }
5126: if (scall != MAT_INITIAL_MATRIX) {
5127: PetscMalloc1(1,&aloc);
5128: aloc[0] = *A_loc;
5129: }
5130: MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);
5131: if (!col) { /* attach global id of condensed columns */
5132: PetscObjectCompose((PetscObject)aloc[0],"_petsc_GetLocalMatCondensed_iscol",(PetscObject)iscola);
5133: }
5134: *A_loc = aloc[0];
5135: PetscFree(aloc);
5136: if (!row) {
5137: ISDestroy(&isrowa);
5138: }
5139: if (!col) {
5140: ISDestroy(&iscola);
5141: }
5142: PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);
5143: return(0);
5144: }
5146: /*
5147: * Destroy a mat that may be composed with PetscSF communication objects.
5148: * The SF objects were created in MatCreateSeqSubMatrixWithRows_Private.
5149: * */
5150: PetscErrorCode MatDestroy_SeqAIJ_PetscSF(Mat mat)
5151: {
5152: PetscSF sf,osf;
5153: IS map;
5154: PetscErrorCode ierr;
5157: PetscObjectQuery((PetscObject)mat,"diagsf",(PetscObject*)&sf);
5158: PetscObjectQuery((PetscObject)mat,"offdiagsf",(PetscObject*)&osf);
5159: PetscSFDestroy(&sf);
5160: PetscSFDestroy(&osf);
5161: PetscObjectQuery((PetscObject)mat,"aoffdiagtopothmapping",(PetscObject*)&map);
5162: ISDestroy(&map);
5163: MatDestroy_SeqAIJ(mat);
5164: return(0);
5165: }
5167: /*
5168: * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5169: * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5170: * on a global size.
5171: * */
5172: PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P,IS rows,Mat *P_oth)
5173: {
5174: Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data;
5175: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data,*p_oth;
5176: PetscInt plocalsize,nrows,*ilocal,*oilocal,i,lidx,*nrcols,*nlcols,ncol;
5177: PetscMPIInt owner;
5178: PetscSFNode *iremote,*oiremote;
5179: const PetscInt *lrowindices;
5180: PetscErrorCode ierr;
5181: PetscSF sf,osf;
5182: PetscInt pcstart,*roffsets,*loffsets,*pnnz,j;
5183: PetscInt ontotalcols,dntotalcols,ntotalcols,nout;
5184: MPI_Comm comm;
5185: ISLocalToGlobalMapping mapping;
5188: PetscObjectGetComm((PetscObject)P,&comm);
5189: /* plocalsize is the number of roots
5190: * nrows is the number of leaves
5191: * */
5192: MatGetLocalSize(P,&plocalsize,NULL);
5193: ISGetLocalSize(rows,&nrows);
5194: PetscCalloc1(nrows,&iremote);
5195: ISGetIndices(rows,&lrowindices);
5196: for (i=0;i<nrows;i++) {
5197: /* Find a remote index and an owner for a row
5198: * The row could be local or remote
5199: * */
5200: owner = 0;
5201: lidx = 0;
5202: PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,&lidx);
5203: iremote[i].index = lidx;
5204: iremote[i].rank = owner;
5205: }
5206: /* Create SF to communicate how many nonzero columns for each row */
5207: PetscSFCreate(comm,&sf);
5208: /* SF will figure out the number of nonzero colunms for each row, and their
5209: * offsets
5210: * */
5211: PetscSFSetGraph(sf,plocalsize,nrows,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5212: PetscSFSetFromOptions(sf);
5213: PetscSFSetUp(sf);
5215: PetscCalloc1(2*(plocalsize+1),&roffsets);
5216: PetscCalloc1(2*plocalsize,&nrcols);
5217: PetscCalloc1(nrows,&pnnz);
5218: roffsets[0] = 0;
5219: roffsets[1] = 0;
5220: for (i=0;i<plocalsize;i++) {
5221: /* diag */
5222: nrcols[i*2+0] = pd->i[i+1] - pd->i[i];
5223: /* off diag */
5224: nrcols[i*2+1] = po->i[i+1] - po->i[i];
5225: /* compute offsets so that we relative location for each row */
5226: roffsets[(i+1)*2+0] = roffsets[i*2+0] + nrcols[i*2+0];
5227: roffsets[(i+1)*2+1] = roffsets[i*2+1] + nrcols[i*2+1];
5228: }
5229: PetscCalloc1(2*nrows,&nlcols);
5230: PetscCalloc1(2*nrows,&loffsets);
5231: /* 'r' means root, and 'l' means leaf */
5232: PetscSFBcastBegin(sf,MPIU_2INT,nrcols,nlcols);
5233: PetscSFBcastBegin(sf,MPIU_2INT,roffsets,loffsets);
5234: PetscSFBcastEnd(sf,MPIU_2INT,nrcols,nlcols);
5235: PetscSFBcastEnd(sf,MPIU_2INT,roffsets,loffsets);
5236: PetscSFDestroy(&sf);
5237: PetscFree(roffsets);
5238: PetscFree(nrcols);
5239: dntotalcols = 0;
5240: ontotalcols = 0;
5241: ncol = 0;
5242: for (i=0;i<nrows;i++) {
5243: pnnz[i] = nlcols[i*2+0] + nlcols[i*2+1];
5244: ncol = PetscMax(pnnz[i],ncol);
5245: /* diag */
5246: dntotalcols += nlcols[i*2+0];
5247: /* off diag */
5248: ontotalcols += nlcols[i*2+1];
5249: }
5250: /* We do not need to figure the right number of columns
5251: * since all the calculations will be done by going through the raw data
5252: * */
5253: MatCreateSeqAIJ(PETSC_COMM_SELF,nrows,ncol,0,pnnz,P_oth);
5254: MatSetUp(*P_oth);
5255: PetscFree(pnnz);
5256: p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5257: /* diag */
5258: PetscCalloc1(dntotalcols,&iremote);
5259: /* off diag */
5260: PetscCalloc1(ontotalcols,&oiremote);
5261: /* diag */
5262: PetscCalloc1(dntotalcols,&ilocal);
5263: /* off diag */
5264: PetscCalloc1(ontotalcols,&oilocal);
5265: dntotalcols = 0;
5266: ontotalcols = 0;
5267: ntotalcols = 0;
5268: for (i=0;i<nrows;i++) {
5269: owner = 0;
5270: PetscLayoutFindOwnerIndex(P->rmap,lrowindices[i],&owner,NULL);
5271: /* Set iremote for diag matrix */
5272: for (j=0;j<nlcols[i*2+0];j++) {
5273: iremote[dntotalcols].index = loffsets[i*2+0] + j;
5274: iremote[dntotalcols].rank = owner;
5275: /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5276: ilocal[dntotalcols++] = ntotalcols++;
5277: }
5278: /* off diag */
5279: for (j=0;j<nlcols[i*2+1];j++) {
5280: oiremote[ontotalcols].index = loffsets[i*2+1] + j;
5281: oiremote[ontotalcols].rank = owner;
5282: oilocal[ontotalcols++] = ntotalcols++;
5283: }
5284: }
5285: ISRestoreIndices(rows,&lrowindices);
5286: PetscFree(loffsets);
5287: PetscFree(nlcols);
5288: PetscSFCreate(comm,&sf);
5289: /* P serves as roots and P_oth is leaves
5290: * Diag matrix
5291: * */
5292: PetscSFSetGraph(sf,pd->i[plocalsize],dntotalcols,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
5293: PetscSFSetFromOptions(sf);
5294: PetscSFSetUp(sf);
5296: PetscSFCreate(comm,&osf);
5297: /* Off diag */
5298: PetscSFSetGraph(osf,po->i[plocalsize],ontotalcols,oilocal,PETSC_OWN_POINTER,oiremote,PETSC_OWN_POINTER);
5299: PetscSFSetFromOptions(osf);
5300: PetscSFSetUp(osf);
5301: /* We operate on the matrix internal data for saving memory */
5302: PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5303: PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5304: MatGetOwnershipRangeColumn(P,&pcstart,NULL);
5305: /* Convert to global indices for diag matrix */
5306: for (i=0;i<pd->i[plocalsize];i++) pd->j[i] += pcstart;
5307: PetscSFBcastBegin(sf,MPIU_INT,pd->j,p_oth->j);
5308: /* We want P_oth store global indices */
5309: ISLocalToGlobalMappingCreate(comm,1,p->B->cmap->n,p->garray,PETSC_COPY_VALUES,&mapping);
5310: /* Use memory scalable approach */
5311: ISLocalToGlobalMappingSetType(mapping,ISLOCALTOGLOBALMAPPINGHASH);
5312: ISLocalToGlobalMappingApply(mapping,po->i[plocalsize],po->j,po->j);
5313: PetscSFBcastBegin(osf,MPIU_INT,po->j,p_oth->j);
5314: PetscSFBcastEnd(sf,MPIU_INT,pd->j,p_oth->j);
5315: /* Convert back to local indices */
5316: for (i=0;i<pd->i[plocalsize];i++) pd->j[i] -= pcstart;
5317: PetscSFBcastEnd(osf,MPIU_INT,po->j,p_oth->j);
5318: nout = 0;
5319: ISGlobalToLocalMappingApply(mapping,IS_GTOLM_DROP,po->i[plocalsize],po->j,&nout,po->j);
5320: if (nout != po->i[plocalsize]) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"n %D does not equal to nout %D \n",po->i[plocalsize],nout);
5321: ISLocalToGlobalMappingDestroy(&mapping);
5322: /* Exchange values */
5323: PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5324: PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5325: /* Stop PETSc from shrinking memory */
5326: for (i=0;i<nrows;i++) p_oth->ilen[i] = p_oth->imax[i];
5327: MatAssemblyBegin(*P_oth,MAT_FINAL_ASSEMBLY);
5328: MatAssemblyEnd(*P_oth,MAT_FINAL_ASSEMBLY);
5329: /* Attach PetscSF objects to P_oth so that we can reuse it later */
5330: PetscObjectCompose((PetscObject)*P_oth,"diagsf",(PetscObject)sf);
5331: PetscObjectCompose((PetscObject)*P_oth,"offdiagsf",(PetscObject)osf);
5332: /* ``New MatDestroy" takes care of PetscSF objects as well */
5333: (*P_oth)->ops->destroy = MatDestroy_SeqAIJ_PetscSF;
5334: return(0);
5335: }
5337: /*
5338: * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5339: * This supports MPIAIJ and MAIJ
5340: * */
5341: PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A,Mat P,PetscInt dof,MatReuse reuse,Mat *P_oth)
5342: {
5343: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
5344: Mat_SeqAIJ *p_oth;
5345: Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
5346: IS rows,map;
5347: PetscHMapI hamp;
5348: PetscInt i,htsize,*rowindices,off,*mapping,key,count;
5349: MPI_Comm comm;
5350: PetscSF sf,osf;
5351: PetscBool has;
5352: PetscErrorCode ierr;
5355: PetscObjectGetComm((PetscObject)A,&comm);
5356: PetscLogEventBegin(MAT_GetBrowsOfAocols,A,P,0,0);
5357: /* If it is the first time, create an index set of off-diag nonzero columns of A,
5358: * and then create a submatrix (that often is an overlapping matrix)
5359: * */
5360: if (reuse==MAT_INITIAL_MATRIX) {
5361: /* Use a hash table to figure out unique keys */
5362: PetscHMapICreate(&hamp);
5363: PetscHMapIResize(hamp,a->B->cmap->n);
5364: PetscCalloc1(a->B->cmap->n,&mapping);
5365: count = 0;
5366: /* Assume that a->g is sorted, otherwise the following does not make sense */
5367: for (i=0;i<a->B->cmap->n;i++) {
5368: key = a->garray[i]/dof;
5369: PetscHMapIHas(hamp,key,&has);
5370: if (!has) {
5371: mapping[i] = count;
5372: PetscHMapISet(hamp,key,count++);
5373: } else {
5374: /* Current 'i' has the same value the previous step */
5375: mapping[i] = count-1;
5376: }
5377: }
5378: ISCreateGeneral(comm,a->B->cmap->n,mapping,PETSC_OWN_POINTER,&map);
5379: PetscHMapIGetSize(hamp,&htsize);
5380: if (htsize!=count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP," Size of hash map %D is inconsistent with count %D \n",htsize,count);
5381: PetscCalloc1(htsize,&rowindices);
5382: off = 0;
5383: PetscHMapIGetKeys(hamp,&off,rowindices);
5384: PetscHMapIDestroy(&hamp);
5385: PetscSortInt(htsize,rowindices);
5386: ISCreateGeneral(comm,htsize,rowindices,PETSC_OWN_POINTER,&rows);
5387: /* In case, the matrix was already created but users want to recreate the matrix */
5388: MatDestroy(P_oth);
5389: MatCreateSeqSubMatrixWithRows_Private(P,rows,P_oth);
5390: PetscObjectCompose((PetscObject)*P_oth,"aoffdiagtopothmapping",(PetscObject)map);
5391: ISDestroy(&rows);
5392: } else if (reuse==MAT_REUSE_MATRIX) {
5393: /* If matrix was already created, we simply update values using SF objects
5394: * that as attached to the matrix ealier.
5395: * */
5396: PetscObjectQuery((PetscObject)*P_oth,"diagsf",(PetscObject*)&sf);
5397: PetscObjectQuery((PetscObject)*P_oth,"offdiagsf",(PetscObject*)&osf);
5398: if (!sf || !osf) {
5399: SETERRQ(comm,PETSC_ERR_ARG_NULL,"Matrix is not initialized yet \n");
5400: }
5401: p_oth = (Mat_SeqAIJ*) (*P_oth)->data;
5402: /* Update values in place */
5403: PetscSFBcastBegin(sf,MPIU_SCALAR,pd->a,p_oth->a);
5404: PetscSFBcastBegin(osf,MPIU_SCALAR,po->a,p_oth->a);
5405: PetscSFBcastEnd(sf,MPIU_SCALAR,pd->a,p_oth->a);
5406: PetscSFBcastEnd(osf,MPIU_SCALAR,po->a,p_oth->a);
5407: } else {
5408: SETERRQ(comm,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown reuse type \n");
5409: }
5410: PetscLogEventEnd(MAT_GetBrowsOfAocols,A,P,0,0);
5411: return(0);
5412: }
5414: /*@C
5415: MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5417: Collective on Mat
5419: Input Parameters:
5420: + A,B - the matrices in mpiaij format
5421: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5422: - rowb, colb - index sets of rows and columns of B to extract (or NULL)
5424: Output Parameter:
5425: + rowb, colb - index sets of rows and columns of B to extract
5426: - B_seq - the sequential matrix generated
5428: Level: developer
5430: @*/
5431: PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
5432: {
5433: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5435: PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark;
5436: IS isrowb,iscolb;
5437: Mat *bseq=NULL;
5440: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5441: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5442: }
5443: PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);
5445: if (scall == MAT_INITIAL_MATRIX) {
5446: start = A->cmap->rstart;
5447: cmap = a->garray;
5448: nzA = a->A->cmap->n;
5449: nzB = a->B->cmap->n;
5450: PetscMalloc1(nzA+nzB, &idx);
5451: ncols = 0;
5452: for (i=0; i<nzB; i++) { /* row < local row index */
5453: if (cmap[i] < start) idx[ncols++] = cmap[i];
5454: else break;
5455: }
5456: imark = i;
5457: for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */
5458: for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5459: ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);
5460: ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);
5461: } else {
5462: if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5463: isrowb = *rowb; iscolb = *colb;
5464: PetscMalloc1(1,&bseq);
5465: bseq[0] = *B_seq;
5466: }
5467: MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);
5468: *B_seq = bseq[0];
5469: PetscFree(bseq);
5470: if (!rowb) {
5471: ISDestroy(&isrowb);
5472: } else {
5473: *rowb = isrowb;
5474: }
5475: if (!colb) {
5476: ISDestroy(&iscolb);
5477: } else {
5478: *colb = iscolb;
5479: }
5480: PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);
5481: return(0);
5482: }
5484: /*
5485: MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
5486: of the OFF-DIAGONAL portion of local A
5488: Collective on Mat
5490: Input Parameters:
5491: + A,B - the matrices in mpiaij format
5492: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
5494: Output Parameter:
5495: + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5496: . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5497: . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5498: - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
5500: Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product
5501: for this matrix. This is not desirable..
5503: Level: developer
5505: */
5506: PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
5507: {
5508: PetscErrorCode ierr;
5509: Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data;
5510: Mat_SeqAIJ *b_oth;
5511: VecScatter ctx;
5512: MPI_Comm comm;
5513: const PetscMPIInt *rprocs,*sprocs;
5514: const PetscInt *srow,*rstarts,*sstarts;
5515: PetscInt *rowlen,*bufj,*bufJ,ncols = 0,aBn=a->B->cmap->n,row,*b_othi,*b_othj,*rvalues=NULL,*svalues=NULL,*cols,sbs,rbs;
5516: PetscInt i,j,k=0,l,ll,nrecvs,nsends,nrows,*rstartsj = 0,*sstartsj,len;
5517: PetscScalar *b_otha,*bufa,*bufA,*vals = NULL;
5518: MPI_Request *rwaits = NULL,*swaits = NULL;
5519: MPI_Status rstatus;
5520: PetscMPIInt jj,size,tag,rank,nsends_mpi,nrecvs_mpi;
5523: PetscObjectGetComm((PetscObject)A,&comm);
5524: MPI_Comm_size(comm,&size);
5526: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
5527: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5528: }
5529: PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);
5530: MPI_Comm_rank(comm,&rank);
5532: if (size == 1) {
5533: startsj_s = NULL;
5534: bufa_ptr = NULL;
5535: *B_oth = NULL;
5536: return(0);
5537: }
5539: ctx = a->Mvctx;
5540: tag = ((PetscObject)ctx)->tag;
5542: if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
5543: VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&srow,&sprocs,&sbs);
5544: /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5545: VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL/*indices not needed*/,&rprocs,&rbs);
5546: PetscMPIIntCast(nsends,&nsends_mpi);
5547: PetscMPIIntCast(nrecvs,&nrecvs_mpi);
5548: PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);
5550: if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5551: if (scall == MAT_INITIAL_MATRIX) {
5552: /* i-array */
5553: /*---------*/
5554: /* post receives */
5555: if (nrecvs) {PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);} /* rstarts can be NULL when nrecvs=0 */
5556: for (i=0; i<nrecvs; i++) {
5557: rowlen = rvalues + rstarts[i]*rbs;
5558: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
5559: MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5560: }
5562: /* pack the outgoing message */
5563: PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);
5565: sstartsj[0] = 0;
5566: rstartsj[0] = 0;
5567: len = 0; /* total length of j or a array to be sent */
5568: if (nsends) {
5569: k = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5570: PetscMalloc1(sbs*(sstarts[nsends]-sstarts[0]),&svalues);
5571: }
5572: for (i=0; i<nsends; i++) {
5573: rowlen = svalues + (sstarts[i]-sstarts[0])*sbs;
5574: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5575: for (j=0; j<nrows; j++) {
5576: row = srow[k] + B->rmap->range[rank]; /* global row idx */
5577: for (l=0; l<sbs; l++) {
5578: MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL); /* rowlength */
5580: rowlen[j*sbs+l] = ncols;
5582: len += ncols;
5583: MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);
5584: }
5585: k++;
5586: }
5587: MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);
5589: sstartsj[i+1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5590: }
5591: /* recvs and sends of i-array are completed */
5592: i = nrecvs;
5593: while (i--) {
5594: MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5595: }
5596: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5597: PetscFree(svalues);
5599: /* allocate buffers for sending j and a arrays */
5600: PetscMalloc1(len+1,&bufj);
5601: PetscMalloc1(len+1,&bufa);
5603: /* create i-array of B_oth */
5604: PetscMalloc1(aBn+2,&b_othi);
5606: b_othi[0] = 0;
5607: len = 0; /* total length of j or a array to be received */
5608: k = 0;
5609: for (i=0; i<nrecvs; i++) {
5610: rowlen = rvalues + (rstarts[i]-rstarts[0])*rbs;
5611: nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of rows to be received */
5612: for (j=0; j<nrows; j++) {
5613: b_othi[k+1] = b_othi[k] + rowlen[j];
5614: PetscIntSumError(rowlen[j],len,&len);
5615: k++;
5616: }
5617: rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5618: }
5619: PetscFree(rvalues);
5621: /* allocate space for j and a arrrays of B_oth */
5622: PetscMalloc1(b_othi[aBn]+1,&b_othj);
5623: PetscMalloc1(b_othi[aBn]+1,&b_otha);
5625: /* j-array */
5626: /*---------*/
5627: /* post receives of j-array */
5628: for (i=0; i<nrecvs; i++) {
5629: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5630: MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);
5631: }
5633: /* pack the outgoing message j-array */
5634: if (nsends) k = sstarts[0];
5635: for (i=0; i<nsends; i++) {
5636: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5637: bufJ = bufj+sstartsj[i];
5638: for (j=0; j<nrows; j++) {
5639: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5640: for (ll=0; ll<sbs; ll++) {
5641: MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5642: for (l=0; l<ncols; l++) {
5643: *bufJ++ = cols[l];
5644: }
5645: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);
5646: }
5647: }
5648: MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);
5649: }
5651: /* recvs and sends of j-array are completed */
5652: i = nrecvs;
5653: while (i--) {
5654: MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5655: }
5656: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5657: } else if (scall == MAT_REUSE_MATRIX) {
5658: sstartsj = *startsj_s;
5659: rstartsj = *startsj_r;
5660: bufa = *bufa_ptr;
5661: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
5662: b_otha = b_oth->a;
5663: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
5665: /* a-array */
5666: /*---------*/
5667: /* post receives of a-array */
5668: for (i=0; i<nrecvs; i++) {
5669: nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5670: MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
5671: }
5673: /* pack the outgoing message a-array */
5674: if (nsends) k = sstarts[0];
5675: for (i=0; i<nsends; i++) {
5676: nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5677: bufA = bufa+sstartsj[i];
5678: for (j=0; j<nrows; j++) {
5679: row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5680: for (ll=0; ll<sbs; ll++) {
5681: MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5682: for (l=0; l<ncols; l++) {
5683: *bufA++ = vals[l];
5684: }
5685: MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);
5686: }
5687: }
5688: MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
5689: }
5690: /* recvs and sends of a-array are completed */
5691: i = nrecvs;
5692: while (i--) {
5693: MPI_Waitany(nrecvs_mpi,rwaits,&jj,&rstatus);
5694: }
5695: if (nsends) {MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);}
5696: PetscFree2(rwaits,swaits);
5698: if (scall == MAT_INITIAL_MATRIX) {
5699: /* put together the new matrix */
5700: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);
5702: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5703: /* Since these are PETSc arrays, change flags to free them as necessary. */
5704: b_oth = (Mat_SeqAIJ*)(*B_oth)->data;
5705: b_oth->free_a = PETSC_TRUE;
5706: b_oth->free_ij = PETSC_TRUE;
5707: b_oth->nonew = 0;
5709: PetscFree(bufj);
5710: if (!startsj_s || !bufa_ptr) {
5711: PetscFree2(sstartsj,rstartsj);
5712: PetscFree(bufa_ptr);
5713: } else {
5714: *startsj_s = sstartsj;
5715: *startsj_r = rstartsj;
5716: *bufa_ptr = bufa;
5717: }
5718: }
5720: VecScatterRestoreRemote_Private(ctx,PETSC_TRUE,&nsends,&sstarts,&srow,&sprocs,&sbs);
5721: VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE,&nrecvs,&rstarts,NULL,&rprocs,&rbs);
5722: PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);
5723: return(0);
5724: }
5726: /*@C
5727: MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
5729: Not Collective
5731: Input Parameters:
5732: . A - The matrix in mpiaij format
5734: Output Parameter:
5735: + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5736: . colmap - A map from global column index to local index into lvec
5737: - multScatter - A scatter from the argument of a matrix-vector product to lvec
5739: Level: developer
5741: @*/
5742: #if defined(PETSC_USE_CTABLE)
5743: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5744: #else
5745: PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5746: #endif
5747: {
5748: Mat_MPIAIJ *a;
5755: a = (Mat_MPIAIJ*) A->data;
5756: if (lvec) *lvec = a->lvec;
5757: if (colmap) *colmap = a->colmap;
5758: if (multScatter) *multScatter = a->Mvctx;
5759: return(0);
5760: }
5762: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5763: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5764: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat,MatType,MatReuse,Mat*);
5765: #if defined(PETSC_HAVE_MKL_SPARSE)
5766: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat,MatType,MatReuse,Mat*);
5767: #endif
5768: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat,MatType,MatReuse,Mat*);
5769: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5770: #if defined(PETSC_HAVE_ELEMENTAL)
5771: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5772: #endif
5773: #if defined(PETSC_HAVE_HYPRE)
5774: PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5775: #endif
5776: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat,MatType,MatReuse,Mat*);
5777: PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat,MatType,MatReuse,Mat*);
5778: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);
5780: /*
5781: Computes (B'*A')' since computing B*A directly is untenable
5783: n p p
5784: ( ) ( ) ( )
5785: m ( A ) * n ( B ) = m ( C )
5786: ( ) ( ) ( )
5788: */
5789: PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5790: {
5792: Mat At,Bt,Ct;
5795: MatTranspose(A,MAT_INITIAL_MATRIX,&At);
5796: MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);
5797: MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);
5798: MatDestroy(&At);
5799: MatDestroy(&Bt);
5800: MatTranspose(Ct,MAT_REUSE_MATRIX,&C);
5801: MatDestroy(&Ct);
5802: return(0);
5803: }
5805: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat C)
5806: {
5808: PetscInt m=A->rmap->n,n=B->cmap->n;
5811: if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5812: MatSetSizes(C,m,n,PETSC_DETERMINE,PETSC_DETERMINE);
5813: MatSetBlockSizesFromMats(C,A,B);
5814: MatSetType(C,MATMPIDENSE);
5815: MatMPIDenseSetPreallocation(C,NULL);
5816: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
5817: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
5819: C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5820: return(0);
5821: }
5823: /* ----------------------------------------------------------------*/
5824: static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
5825: {
5826: Mat_Product *product = C->product;
5827: Mat A = product->A,B=product->B;
5830: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
5831: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
5833: C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
5834: C->ops->productsymbolic = MatProductSymbolic_AB;
5835: return(0);
5836: }
5838: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
5839: {
5841: Mat_Product *product = C->product;
5844: MatSetType(C,MATMPIDENSE);
5845: if (product->type == MATPRODUCT_AB) {
5846: MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C);
5847: } else SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"MatProduct type %s is not supported for MPIDense and MPIAIJ matrices",MatProductTypes[product->type]);
5848: return(0);
5849: }
5850: /* ----------------------------------------------------------------*/
5852: /*MC
5853: MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
5855: Options Database Keys:
5856: . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
5858: Level: beginner
5860: Notes:
5861: MatSetValues() may be called for this matrix type with a NULL argument for the numerical values,
5862: in this case the values associated with the rows and columns one passes in are set to zero
5863: in the matrix
5865: MatSetOptions(,MAT_STRUCTURE_ONLY,PETSC_TRUE) may be called for this matrix type. In this no
5866: space is allocated for the nonzero entries and any entries passed with MatSetValues() are ignored
5868: .seealso: MatCreateAIJ()
5869: M*/
5871: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5872: {
5873: Mat_MPIAIJ *b;
5875: PetscMPIInt size;
5878: MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);
5880: PetscNewLog(B,&b);
5881: B->data = (void*)b;
5882: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
5883: B->assembled = PETSC_FALSE;
5884: B->insertmode = NOT_SET_VALUES;
5885: b->size = size;
5887: MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);
5889: /* build cache for off array entries formed */
5890: MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);
5892: b->donotstash = PETSC_FALSE;
5893: b->colmap = 0;
5894: b->garray = 0;
5895: b->roworiented = PETSC_TRUE;
5897: /* stuff used for matrix vector multiply */
5898: b->lvec = NULL;
5899: b->Mvctx = NULL;
5901: /* stuff for MatGetRow() */
5902: b->rowindices = 0;
5903: b->rowvalues = 0;
5904: b->getrowactive = PETSC_FALSE;
5906: /* flexible pointer used in CUSP/CUSPARSE classes */
5907: b->spptr = NULL;
5909: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);
5910: PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);
5911: PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);
5912: PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);
5913: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);
5914: PetscObjectComposeFunction((PetscObject)B,"MatResetPreallocation_C",MatResetPreallocation_MPIAIJ);
5915: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);
5916: PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);
5917: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);
5918: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijsell_C",MatConvert_MPIAIJ_MPIAIJSELL);
5919: #if defined(PETSC_HAVE_MKL_SPARSE)
5920: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijmkl_C",MatConvert_MPIAIJ_MPIAIJMKL);
5921: #endif
5922: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);
5923: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpibaij_C",MatConvert_MPIAIJ_MPIBAIJ);
5924: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);
5925: #if defined(PETSC_HAVE_ELEMENTAL)
5926: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);
5927: #endif
5928: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_XAIJ_IS);
5929: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisell_C",MatConvert_MPIAIJ_MPISELL);
5930: PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);
5931: PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);
5932: #if defined(PETSC_HAVE_HYPRE)
5933: PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);
5934: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_transpose_mpiaij_mpiaij_C",MatProductSetFromOptions_Transpose_AIJ_AIJ);
5935: #endif
5936: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_is_mpiaij_C",MatProductSetFromOptions_IS_XAIJ);
5937: PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_mpiaij_mpiaij_C",MatProductSetFromOptions_MPIAIJ);
5938: PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);
5939: return(0);
5940: }
5942: /*@C
5943: MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5944: and "off-diagonal" part of the matrix in CSR format.
5946: Collective
5948: Input Parameters:
5949: + comm - MPI communicator
5950: . m - number of local rows (Cannot be PETSC_DECIDE)
5951: . n - This value should be the same as the local size used in creating the
5952: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5953: calculated if N is given) For square matrices n is almost always m.
5954: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5955: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5956: . i - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
5957: . j - column indices
5958: . a - matrix values
5959: . oi - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
5960: . oj - column indices
5961: - oa - matrix values
5963: Output Parameter:
5964: . mat - the matrix
5966: Level: advanced
5968: Notes:
5969: The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
5970: must free the arrays once the matrix has been destroyed and not before.
5972: The i and j indices are 0 based
5974: See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5976: This sets local rows and cannot be used to set off-processor values.
5978: Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
5979: legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
5980: not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
5981: the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
5982: keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
5983: communication if it is known that only local entries will be set.
5985: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5986: MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
5987: @*/
5988: PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5989: {
5991: Mat_MPIAIJ *maij;
5994: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5995: if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5996: if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5997: MatCreate(comm,mat);
5998: MatSetSizes(*mat,m,n,M,N);
5999: MatSetType(*mat,MATMPIAIJ);
6000: maij = (Mat_MPIAIJ*) (*mat)->data;
6002: (*mat)->preallocated = PETSC_TRUE;
6004: PetscLayoutSetUp((*mat)->rmap);
6005: PetscLayoutSetUp((*mat)->cmap);
6007: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);
6008: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);
6010: MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);
6011: MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);
6012: MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);
6013: MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);
6015: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);
6016: MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);
6017: MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);
6018: MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);
6019: MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
6020: return(0);
6021: }
6023: /*
6024: Special version for direct calls from Fortran
6025: */
6026: #include <petsc/private/fortranimpl.h>
6028: /* Change these macros so can be used in void function */
6029: #undef CHKERRQ
6030: #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
6031: #undef SETERRQ2
6032: #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
6033: #undef SETERRQ3
6034: #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
6035: #undef SETERRQ
6036: #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)
6038: #if defined(PETSC_HAVE_FORTRAN_CAPS)
6039: #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
6040: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
6041: #define matsetvaluesmpiaij_ matsetvaluesmpiaij
6042: #else
6043: #endif
6044: PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
6045: {
6046: Mat mat = *mmat;
6047: PetscInt m = *mm, n = *mn;
6048: InsertMode addv = *maddv;
6049: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
6050: PetscScalar value;
6053: MatCheckPreallocated(mat,1);
6054: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
6056: #if defined(PETSC_USE_DEBUG)
6057: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
6058: #endif
6059: {
6060: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
6061: PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
6062: PetscBool roworiented = aij->roworiented;
6064: /* Some Variables required in the macro */
6065: Mat A = aij->A;
6066: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
6067: PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
6068: MatScalar *aa = a->a;
6069: PetscBool ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
6070: Mat B = aij->B;
6071: Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
6072: PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
6073: MatScalar *ba = b->a;
6074: /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
6075: * cannot use "#if defined" inside a macro. */
6076: PETSC_UNUSED PetscBool inserted = PETSC_FALSE;
6078: PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
6079: PetscInt nonew = a->nonew;
6080: MatScalar *ap1,*ap2;
6083: for (i=0; i<m; i++) {
6084: if (im[i] < 0) continue;
6085: #if defined(PETSC_USE_DEBUG)
6086: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
6087: #endif
6088: if (im[i] >= rstart && im[i] < rend) {
6089: row = im[i] - rstart;
6090: lastcol1 = -1;
6091: rp1 = aj + ai[row];
6092: ap1 = aa + ai[row];
6093: rmax1 = aimax[row];
6094: nrow1 = ailen[row];
6095: low1 = 0;
6096: high1 = nrow1;
6097: lastcol2 = -1;
6098: rp2 = bj + bi[row];
6099: ap2 = ba + bi[row];
6100: rmax2 = bimax[row];
6101: nrow2 = bilen[row];
6102: low2 = 0;
6103: high2 = nrow2;
6105: for (j=0; j<n; j++) {
6106: if (roworiented) value = v[i*n+j];
6107: else value = v[i+j*m];
6108: if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
6109: if (in[j] >= cstart && in[j] < cend) {
6110: col = in[j] - cstart;
6111: MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
6112: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6113: if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) A->offloadmask = PETSC_OFFLOAD_CPU;
6114: #endif
6115: } else if (in[j] < 0) continue;
6116: #if defined(PETSC_USE_DEBUG)
6117: /* extra brace on SETERRQ2() is required for --with-errorchecking=0 - due to the next 'else' clause */
6118: else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
6119: #endif
6120: else {
6121: if (mat->was_assembled) {
6122: if (!aij->colmap) {
6123: MatCreateColmap_MPIAIJ_Private(mat);
6124: }
6125: #if defined(PETSC_USE_CTABLE)
6126: PetscTableFind(aij->colmap,in[j]+1,&col);
6127: col--;
6128: #else
6129: col = aij->colmap[in[j]] - 1;
6130: #endif
6131: if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
6132: MatDisAssemble_MPIAIJ(mat);
6133: col = in[j];
6134: /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
6135: B = aij->B;
6136: b = (Mat_SeqAIJ*)B->data;
6137: bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
6138: rp2 = bj + bi[row];
6139: ap2 = ba + bi[row];
6140: rmax2 = bimax[row];
6141: nrow2 = bilen[row];
6142: low2 = 0;
6143: high2 = nrow2;
6144: bm = aij->B->rmap->n;
6145: ba = b->a;
6146: inserted = PETSC_FALSE;
6147: }
6148: } else col = in[j];
6149: MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
6150: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6151: if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && inserted) B->offloadmask = PETSC_OFFLOAD_CPU;
6152: #endif
6153: }
6154: }
6155: } else if (!aij->donotstash) {
6156: if (roworiented) {
6157: MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6158: } else {
6159: MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));
6160: }
6161: }
6162: }
6163: }
6164: PetscFunctionReturnVoid();
6165: }