X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=libgfortran%2Fgenerated%2Fmatmul_c16.c;h=f7301114b377e6d1914275b54c130ce9335fcdf0;hp=451ea82f6e84263a078db48a4a1061550135d307;hb=4e8e57b0ce67551ca61b7883e73586ba805f0a61;hpb=02013060b6833482f7f0f191caea397cbbae8758 diff --git a/libgfortran/generated/matmul_c16.c b/libgfortran/generated/matmul_c16.c index 451ea82f6e8..f7301114b37 100644 --- a/libgfortran/generated/matmul_c16.c +++ b/libgfortran/generated/matmul_c16.c @@ -1,5 +1,5 @@ /* Implementation of the MATMUL intrinsic - Copyright 2002, 2005 Free Software Foundation, Inc. + Copyright 2002, 2005, 2006 Free Software Foundation, Inc. Contributed by Paul Brook This file is part of the GNU Fortran 95 runtime library (libgfortran). @@ -36,25 +36,54 @@ Boston, MA 02110-1301, USA. */ #if defined (HAVE_GFC_COMPLEX_16) -/* This is a C version of the following fortran pseudo-code. The key - point is the loop order -- we access all arrays column-first, which - improves the performance enough to boost galgel spec score by 50%. +/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be + passed to us by the front-end, in which case we'll call it for large + matrices. */ + +typedef void (*blas_call)(const char *, const char *, const int *, const int *, + const int *, const GFC_COMPLEX_16 *, const GFC_COMPLEX_16 *, + const int *, const GFC_COMPLEX_16 *, const int *, + const GFC_COMPLEX_16 *, GFC_COMPLEX_16 *, const int *, + int, int); + +/* The order of loops is different in the case of plain matrix + multiplication C=MATMUL(A,B), and in the frequent special case where + the argument A is the temporary result of a TRANSPOSE intrinsic: + C=MATMUL(TRANSPOSE(A),B). Transposed temporaries are detected by + looking at their strides. + + The equivalent Fortran pseudo-code is: DIMENSION A(M,COUNT), B(COUNT,N), C(M,N) - C = 0 - DO J=1,N - DO K=1,COUNT + IF (.NOT.IS_TRANSPOSED(A)) THEN + C = 0 + DO J=1,N + DO K=1,COUNT + DO I=1,M + C(I,J) = C(I,J)+A(I,K)*B(K,J) + ELSE + DO J=1,N DO I=1,M - C(I,J) = C(I,J)+A(I,K)*B(K,J) + S = 0 + DO K=1,COUNT + S = S+A(I,K)*B(K,J) + C(I,J) = S + ENDIF */ +/* If try_blas is set to a nonzero value, then the matmul function will + see if there is a way to perform the matrix multiplication by a call + to the BLAS gemm function. */ + extern void matmul_c16 (gfc_array_c16 * const restrict retarray, - gfc_array_c16 * const restrict a, gfc_array_c16 * const restrict b); + gfc_array_c16 * const restrict a, gfc_array_c16 * const restrict b, int try_blas, + int blas_limit, blas_call gemm); export_proto(matmul_c16); void matmul_c16 (gfc_array_c16 * const restrict retarray, - gfc_array_c16 * const restrict a, gfc_array_c16 * const restrict b) + gfc_array_c16 * const restrict a, gfc_array_c16 * const restrict b, int try_blas, + int blas_limit, blas_call gemm) { const GFC_COMPLEX_16 * restrict abase; const GFC_COMPLEX_16 * restrict bbase; @@ -107,15 +136,6 @@ matmul_c16 (gfc_array_c16 * const restrict retarray, retarray->offset = 0; } - if (retarray->dim[0].stride == 0) - retarray->dim[0].stride = 1; - - /* This prevents constifying the input arguments. */ - if (a->dim[0].stride == 0) - a->dim[0].stride = 1; - if (b->dim[0].stride == 0) - b->dim[0].stride = 1; - if (GFC_DESCRIPTOR_RANK (retarray) == 1) { @@ -173,6 +193,31 @@ matmul_c16 (gfc_array_c16 * const restrict retarray, bbase = b->data; dest = retarray->data; + + /* Now that everything is set up, we're performing the multiplication + itself. */ + +#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x))) + + if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1) + && (bxstride == 1 || bystride == 1) + && (((float) xcount) * ((float) ycount) * ((float) count) + > POW3(blas_limit))) + { + const int m = xcount, n = ycount, k = count, ldc = rystride; + const GFC_COMPLEX_16 one = 1, zero = 0; + const int lda = (axstride == 1) ? aystride : axstride, + ldb = (bxstride == 1) ? bystride : bxstride; + + if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1) + { + assert (gemm != NULL); + gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k, + &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1); + return; + } + } + if (rxstride == 1 && axstride == 1 && bxstride == 1) { const GFC_COMPLEX_16 * restrict bbase_y; @@ -180,8 +225,8 @@ matmul_c16 (gfc_array_c16 * const restrict retarray, const GFC_COMPLEX_16 * restrict abase_n; GFC_COMPLEX_16 bbase_yn; - if (rystride == ycount) - memset (dest, 0, (sizeof (GFC_COMPLEX_16) * size0((array_t *) retarray))); + if (rystride == xcount) + memset (dest, 0, (sizeof (GFC_COMPLEX_16) * xcount * ycount)); else { for (y = 0; y < ycount; y++) @@ -204,7 +249,45 @@ matmul_c16 (gfc_array_c16 * const restrict retarray, } } } - else + else if (rxstride == 1 && aystride == 1 && bxstride == 1) + { + if (GFC_DESCRIPTOR_RANK (a) != 1) + { + const GFC_COMPLEX_16 *restrict abase_x; + const GFC_COMPLEX_16 *restrict bbase_y; + GFC_COMPLEX_16 *restrict dest_y; + GFC_COMPLEX_16 s; + + for (y = 0; y < ycount; y++) + { + bbase_y = &bbase[y*bystride]; + dest_y = &dest[y*rystride]; + for (x = 0; x < xcount; x++) + { + abase_x = &abase[x*axstride]; + s = (GFC_COMPLEX_16) 0; + for (n = 0; n < count; n++) + s += abase_x[n] * bbase_y[n]; + dest_y[x] = s; + } + } + } + else + { + const GFC_COMPLEX_16 *restrict bbase_y; + GFC_COMPLEX_16 s; + + for (y = 0; y < ycount; y++) + { + bbase_y = &bbase[y*bystride]; + s = (GFC_COMPLEX_16) 0; + for (n = 0; n < count; n++) + s += abase[n*axstride] * bbase_y[n]; + dest[y*rystride] = s; + } + } + } + else if (axstride < aystride) { for (y = 0; y < ycount; y++) for (x = 0; x < xcount; x++) @@ -216,6 +299,41 @@ matmul_c16 (gfc_array_c16 * const restrict retarray, /* dest[x,y] += a[x,n] * b[n,y] */ dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride]; } + else if (GFC_DESCRIPTOR_RANK (a) == 1) + { + const GFC_COMPLEX_16 *restrict bbase_y; + GFC_COMPLEX_16 s; + + for (y = 0; y < ycount; y++) + { + bbase_y = &bbase[y*bystride]; + s = (GFC_COMPLEX_16) 0; + for (n = 0; n < count; n++) + s += abase[n*axstride] * bbase_y[n*bxstride]; + dest[y*rxstride] = s; + } + } + else + { + const GFC_COMPLEX_16 *restrict abase_x; + const GFC_COMPLEX_16 *restrict bbase_y; + GFC_COMPLEX_16 *restrict dest_y; + GFC_COMPLEX_16 s; + + for (y = 0; y < ycount; y++) + { + bbase_y = &bbase[y*bystride]; + dest_y = &dest[y*rystride]; + for (x = 0; x < xcount; x++) + { + abase_x = &abase[x*axstride]; + s = (GFC_COMPLEX_16) 0; + for (n = 0; n < count; n++) + s += abase_x[n*aystride] * bbase_y[n*bxstride]; + dest_y[x*rxstride] = s; + } + } + } } #endif