|
libflame
12600
|
Functions | |
| void | bl1_sinvscalm (conj1_t conj, int m, int n, float *alpha, float *a, int a_rs, int a_cs) |
| void | bl1_dinvscalm (conj1_t conj, int m, int n, double *alpha, double *a, int a_rs, int a_cs) |
| void | bl1_csinvscalm (conj1_t conj, int m, int n, float *alpha, scomplex *a, int a_rs, int a_cs) |
| void | bl1_cinvscalm (conj1_t conj, int m, int n, scomplex *alpha, scomplex *a, int a_rs, int a_cs) |
| void | bl1_zdinvscalm (conj1_t conj, int m, int n, double *alpha, dcomplex *a, int a_rs, int a_cs) |
| void | bl1_zinvscalm (conj1_t conj, int m, int n, dcomplex *alpha, dcomplex *a, int a_rs, int a_cs) |
| void bl1_cinvscalm | ( | conj1_t | conj, |
| int | m, | ||
| int | n, | ||
| scomplex * | alpha, | ||
| scomplex * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_cinvert2s(), bl1_cscal(), bl1_is_row_storage(), bl1_is_vector(), bl1_vector_dim(), bl1_vector_inc(), bl1_zero_dim2(), and BLIS1_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{
scomplex alpha_inv;
scomplex* a_begin;
int lda, inca;
int n_iter;
int n_elem;
int j;
// Return early if possible.
if ( bl1_zero_dim2( m, n ) ) return;
if ( bl1_ceq1( alpha ) ) return;
// Handle cases where A is a vector to ensure that the underlying axpy
// gets invoked only once.
if ( bl1_is_vector( m, n ) )
{
// Initialize with values appropriate for a vector.
n_iter = 1;
n_elem = bl1_vector_dim( m, n );
lda = 1; // multiplied by zero when n_iter == 1; not needed.
inca = bl1_vector_inc( BLIS1_NO_TRANSPOSE, m, n, a_rs, a_cs );
}
else // matrix case
{
// Initialize with optimal values for column-major storage.
n_iter = n;
n_elem = m;
lda = a_cs;
inca = a_rs;
// An optimization: if A is row-major, then let's access the matrix
// by rows instead of by columns to increase spatial locality.
if ( bl1_is_row_storage( a_rs, a_cs ) )
{
bl1_swap_ints( n_iter, n_elem );
bl1_swap_ints( lda, inca );
}
}
bl1_cinvert2s( conj, alpha, &alpha_inv );
for ( j = 0; j < n_iter; j++ )
{
a_begin = a + j*lda;
bl1_cscal( n_elem,
&alpha_inv,
a_begin, inca );
}
}
| void bl1_csinvscalm | ( | conj1_t | conj, |
| int | m, | ||
| int | n, | ||
| float * | alpha, | ||
| scomplex * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_csscal(), bl1_is_row_storage(), bl1_is_vector(), bl1_sinvert2s(), bl1_vector_dim(), bl1_vector_inc(), bl1_zero_dim2(), and BLIS1_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{
float alpha_inv;
scomplex* a_begin;
int lda, inca;
int n_iter;
int n_elem;
int j;
// Return early if possible.
if ( bl1_zero_dim2( m, n ) ) return;
if ( bl1_seq1( alpha ) ) return;
// Handle cases where A is a vector to ensure that the underlying axpy
// gets invoked only once.
if ( bl1_is_vector( m, n ) )
{
// Initialize with values appropriate for a vector.
n_iter = 1;
n_elem = bl1_vector_dim( m, n );
lda = 1; // multiplied by zero when n_iter == 1; not needed.
inca = bl1_vector_inc( BLIS1_NO_TRANSPOSE, m, n, a_rs, a_cs );
}
else // matrix case
{
// Initialize with optimal values for column-major storage.
n_iter = n;
n_elem = m;
lda = a_cs;
inca = a_rs;
// An optimization: if A is row-major, then let's access the matrix
// by rows instead of by columns to increase spatial locality.
if ( bl1_is_row_storage( a_rs, a_cs ) )
{
bl1_swap_ints( n_iter, n_elem );
bl1_swap_ints( lda, inca );
}
}
bl1_sinvert2s( conj, alpha, &alpha_inv );
for ( j = 0; j < n_iter; j++ )
{
a_begin = a + j*lda;
bl1_csscal( n_elem,
&alpha_inv,
a_begin, inca );
}
}
| void bl1_dinvscalm | ( | conj1_t | conj, |
| int | m, | ||
| int | n, | ||
| double * | alpha, | ||
| double * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_dinvert2s(), bl1_dscal(), bl1_is_row_storage(), bl1_is_vector(), bl1_vector_dim(), bl1_vector_inc(), bl1_zero_dim2(), and BLIS1_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{
double alpha_inv;
double* a_begin;
int lda, inca;
int n_iter;
int n_elem;
int j;
// Return early if possible.
if ( bl1_zero_dim2( m, n ) ) return;
if ( bl1_deq1( alpha ) ) return;
// Handle cases where A is a vector to ensure that the underlying axpy
// gets invoked only once.
if ( bl1_is_vector( m, n ) )
{
// Initialize with values appropriate for a vector.
n_iter = 1;
n_elem = bl1_vector_dim( m, n );
lda = 1; // multiplied by zero when n_iter == 1; not needed.
inca = bl1_vector_inc( BLIS1_NO_TRANSPOSE, m, n, a_rs, a_cs );
}
else // matrix case
{
// Initialize with optimal values for column-major storage.
n_iter = n;
n_elem = m;
lda = a_cs;
inca = a_rs;
// An optimization: if A is row-major, then let's access the matrix
// by rows instead of by columns to increase spatial locality.
if ( bl1_is_row_storage( a_rs, a_cs ) )
{
bl1_swap_ints( n_iter, n_elem );
bl1_swap_ints( lda, inca );
}
}
bl1_dinvert2s( conj, alpha, &alpha_inv );
for ( j = 0; j < n_iter; j++ )
{
a_begin = a + j*lda;
bl1_dscal( n_elem,
&alpha_inv,
a_begin, inca );
}
}
| void bl1_sinvscalm | ( | conj1_t | conj, |
| int | m, | ||
| int | n, | ||
| float * | alpha, | ||
| float * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_is_row_storage(), bl1_is_vector(), bl1_sinvert2s(), bl1_sscal(), bl1_vector_dim(), bl1_vector_inc(), bl1_zero_dim2(), and BLIS1_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{
float alpha_inv;
float* a_begin;
int lda, inca;
int n_iter;
int n_elem;
int j;
// Return early if possible.
if ( bl1_zero_dim2( m, n ) ) return;
if ( bl1_seq1( alpha ) ) return;
// Handle cases where A is a vector to ensure that the underlying axpy
// gets invoked only once.
if ( bl1_is_vector( m, n ) )
{
// Initialize with values appropriate for a vector.
n_iter = 1;
n_elem = bl1_vector_dim( m, n );
lda = 1; // multiplied by zero when n_iter == 1; not needed.
inca = bl1_vector_inc( BLIS1_NO_TRANSPOSE, m, n, a_rs, a_cs );
}
else // matrix case
{
// Initialize with optimal values for column-major storage.
n_iter = n;
n_elem = m;
lda = a_cs;
inca = a_rs;
// An optimization: if A is row-major, then let's access the matrix
// by rows instead of by columns to increase spatial locality.
if ( bl1_is_row_storage( a_rs, a_cs ) )
{
bl1_swap_ints( n_iter, n_elem );
bl1_swap_ints( lda, inca );
}
}
bl1_sinvert2s( conj, alpha, &alpha_inv );
for ( j = 0; j < n_iter; j++ )
{
a_begin = a + j*lda;
bl1_sscal( n_elem,
&alpha_inv,
a_begin, inca );
}
}
| void bl1_zdinvscalm | ( | conj1_t | conj, |
| int | m, | ||
| int | n, | ||
| double * | alpha, | ||
| dcomplex * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_dinvert2s(), bl1_is_row_storage(), bl1_is_vector(), bl1_vector_dim(), bl1_vector_inc(), bl1_zdscal(), bl1_zero_dim2(), and BLIS1_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{
double alpha_inv;
dcomplex* a_begin;
int lda, inca;
int n_iter;
int n_elem;
int j;
// Return early if possible.
if ( bl1_zero_dim2( m, n ) ) return;
if ( bl1_deq1( alpha ) ) return;
// Handle cases where A is a vector to ensure that the underlying axpy
// gets invoked only once.
if ( bl1_is_vector( m, n ) )
{
// Initialize with values appropriate for a vector.
n_iter = 1;
n_elem = bl1_vector_dim( m, n );
lda = 1; // multiplied by zero when n_iter == 1; not needed.
inca = bl1_vector_inc( BLIS1_NO_TRANSPOSE, m, n, a_rs, a_cs );
}
else // matrix case
{
// Initialize with optimal values for column-major storage.
n_iter = n;
n_elem = m;
lda = a_cs;
inca = a_rs;
// An optimization: if A is row-major, then let's access the matrix
// by rows instead of by columns to increase spatial locality.
if ( bl1_is_row_storage( a_rs, a_cs ) )
{
bl1_swap_ints( n_iter, n_elem );
bl1_swap_ints( lda, inca );
}
}
bl1_dinvert2s( conj, alpha, &alpha_inv );
for ( j = 0; j < n_iter; j++ )
{
a_begin = a + j*lda;
bl1_zdscal( n_elem,
&alpha_inv,
a_begin, inca );
}
}
| void bl1_zinvscalm | ( | conj1_t | conj, |
| int | m, | ||
| int | n, | ||
| dcomplex * | alpha, | ||
| dcomplex * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_is_row_storage(), bl1_is_vector(), bl1_vector_dim(), bl1_vector_inc(), bl1_zero_dim2(), bl1_zinvert2s(), bl1_zscal(), and BLIS1_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{
dcomplex alpha_inv;
dcomplex* a_begin;
int lda, inca;
int n_iter;
int n_elem;
int j;
// Return early if possible.
if ( bl1_zero_dim2( m, n ) ) return;
if ( bl1_zeq1( alpha ) ) return;
// Handle cases where A is a vector to ensure that the underlying axpy
// gets invoked only once.
if ( bl1_is_vector( m, n ) )
{
// Initialize with values appropriate for a vector.
n_iter = 1;
n_elem = bl1_vector_dim( m, n );
lda = 1; // multiplied by zero when n_iter == 1; not needed.
inca = bl1_vector_inc( BLIS1_NO_TRANSPOSE, m, n, a_rs, a_cs );
}
else // matrix case
{
// Initialize with optimal values for column-major storage.
n_iter = n;
n_elem = m;
lda = a_cs;
inca = a_rs;
// An optimization: if A is row-major, then let's access the matrix
// by rows instead of by columns to increase spatial locality.
if ( bl1_is_row_storage( a_rs, a_cs ) )
{
bl1_swap_ints( n_iter, n_elem );
bl1_swap_ints( lda, inca );
}
}
bl1_zinvert2s( conj, alpha, &alpha_inv );
for ( j = 0; j < n_iter; j++ )
{
a_begin = a + j*lda;
bl1_zscal( n_elem,
&alpha_inv,
a_begin, inca );
}
}
1.7.6.1