|
libflame
12600
|
Functions | |
| void | bl1_ssymmize (conj1_t conj, uplo1_t uplo, int m, float *a, int a_rs, int a_cs) |
| void | bl1_dsymmize (conj1_t conj, uplo1_t uplo, int m, double *a, int a_rs, int a_cs) |
| void | bl1_csymmize (conj1_t conj, uplo1_t uplo, int m, scomplex *a, int a_rs, int a_cs) |
| void | bl1_zsymmize (conj1_t conj, uplo1_t uplo, int m, dcomplex *a, int a_rs, int a_cs) |
| void bl1_csymmize | ( | conj1_t | conj, |
| uplo1_t | uplo, | ||
| int | m, | ||
| scomplex * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_ccopyv(), bl1_is_col_storage(), bl1_is_conj(), bl1_is_gen_storage(), bl1_is_lower(), bl1_is_row_storage(), bl1_is_upper(), bl1_s0(), bl1_zero_dim1(), and scomplex::imag.
Referenced by FLA_Hermitianize(), and FLA_Symmetrize().
{
scomplex* a_src;
scomplex* a_dst;
scomplex* a_jj;
int rs_src, cs_src, inc_src;
int rs_dst, cs_dst, inc_dst;
int n_iter;
int j;
// Return early if possible.
if ( bl1_zero_dim1( m ) ) return;
// Assume A is square.
n_iter = m;
// Initialize with appropriate values based on storage.
if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 1;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1;
}
else if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1;
cs_dst = 1;
rs_dst = 0;
inc_dst = a_cs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1;
cs_dst = 0;
rs_dst = 1;
inc_dst = a_rs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = 0;
rs_src = 1;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1;
}
else if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = 1 * a_rs;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1 * a_rs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1 * a_cs;
cs_dst = 0;
rs_dst = 1 * a_cs;
inc_dst = a_rs;
}
}
else // if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1 * a_rs;
cs_dst = 1 * a_rs;
rs_dst = 0;
inc_dst = a_cs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = 1 * a_cs;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1 * a_cs;
}
}
for ( j = 0; j < n_iter; j++ )
{
a_src = a + j*cs_src + j*rs_src;
a_dst = a + j*cs_dst + j*rs_dst;
bl1_ccopyv( conj,
j,
a_src, inc_src,
a_dst, inc_dst );
if ( bl1_is_conj( conj ) )
{
a_jj = a + j*a_rs + j*a_cs;
a_jj->imag = bl1_s0();
}
}
}
| void bl1_dsymmize | ( | conj1_t | conj, |
| uplo1_t | uplo, | ||
| int | m, | ||
| double * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_dcopyv(), bl1_is_col_storage(), bl1_is_gen_storage(), bl1_is_lower(), bl1_is_row_storage(), bl1_is_upper(), and bl1_zero_dim1().
Referenced by FLA_Hermitianize(), and FLA_Symmetrize().
{
double* a_src;
double* a_dst;
int rs_src, cs_src, inc_src;
int rs_dst, cs_dst, inc_dst;
int n_iter;
int j;
// Return early if possible.
if ( bl1_zero_dim1( m ) ) return;
// Assume A is square.
n_iter = m;
// Initialize with appropriate values based on storage.
if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 1;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1;
}
else if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1;
cs_dst = 1;
rs_dst = 0;
inc_dst = a_cs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1;
cs_dst = 0;
rs_dst = 1;
inc_dst = a_rs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = 0;
rs_src = 1;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1;
}
else if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = 1 * a_rs;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1 * a_rs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1 * a_cs;
cs_dst = 0;
rs_dst = 1 * a_cs;
inc_dst = a_rs;
}
}
else // if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1 * a_rs;
cs_dst = 1 * a_rs;
rs_dst = 0;
inc_dst = a_cs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = 1 * a_cs;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1 * a_cs;
}
}
for ( j = 0; j < n_iter; j++ )
{
a_src = a + j*cs_src + j*rs_src;
a_dst = a + j*cs_dst + j*rs_dst;
bl1_dcopyv( conj,
j,
a_src, inc_src,
a_dst, inc_dst );
}
}
| void bl1_ssymmize | ( | conj1_t | conj, |
| uplo1_t | uplo, | ||
| int | m, | ||
| float * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_is_col_storage(), bl1_is_gen_storage(), bl1_is_lower(), bl1_is_row_storage(), bl1_is_upper(), bl1_scopyv(), and bl1_zero_dim1().
Referenced by FLA_Hermitianize(), and FLA_Symmetrize().
{
float* a_src;
float* a_dst;
int rs_src, cs_src, inc_src;
int rs_dst, cs_dst, inc_dst;
int n_iter;
int j;
// Return early if possible.
if ( bl1_zero_dim1( m ) ) return;
// Assume A is square.
n_iter = m;
// Initialize with appropriate values based on storage.
if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 1;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1;
}
else if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1;
cs_dst = 1;
rs_dst = 0;
inc_dst = a_cs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1;
cs_dst = 0;
rs_dst = 1;
inc_dst = a_rs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = 0;
rs_src = 1;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1;
}
else if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = 1 * a_rs;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1 * a_rs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1 * a_cs;
cs_dst = 0;
rs_dst = 1 * a_cs;
inc_dst = a_rs;
}
}
else // if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1 * a_rs;
cs_dst = 1 * a_rs;
rs_dst = 0;
inc_dst = a_cs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = 1 * a_cs;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1 * a_cs;
}
}
for ( j = 0; j < n_iter; j++ )
{
a_src = a + j*cs_src + j*rs_src;
a_dst = a + j*cs_dst + j*rs_dst;
bl1_scopyv( conj,
j,
a_src, inc_src,
a_dst, inc_dst );
}
}
| void bl1_zsymmize | ( | conj1_t | conj, |
| uplo1_t | uplo, | ||
| int | m, | ||
| dcomplex * | a, | ||
| int | a_rs, | ||
| int | a_cs | ||
| ) |
References bl1_d0(), bl1_is_col_storage(), bl1_is_conj(), bl1_is_gen_storage(), bl1_is_lower(), bl1_is_row_storage(), bl1_is_upper(), bl1_zcopyv(), bl1_zero_dim1(), and dcomplex::imag.
Referenced by FLA_Hermitianize(), and FLA_Symmetrize().
{
dcomplex* a_src;
dcomplex* a_dst;
dcomplex* a_jj;
int rs_src, cs_src, inc_src;
int rs_dst, cs_dst, inc_dst;
int n_iter;
int j;
// Return early if possible.
if ( bl1_zero_dim1( m ) ) return;
// Assume A is square.
n_iter = m;
// Initialize with appropriate values based on storage.
if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 1;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1;
}
else if ( bl1_is_col_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1;
cs_dst = 1;
rs_dst = 0;
inc_dst = a_cs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1;
cs_dst = 0;
rs_dst = 1;
inc_dst = a_rs;
}
else if ( bl1_is_row_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
cs_src = 0;
rs_src = 1;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1;
}
else if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_lower( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = 1 * a_rs;
rs_src = 0;
inc_src = a_cs;
cs_dst = a_cs;
rs_dst = 0;
inc_dst = 1 * a_rs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = a_rs;
inc_src = 1 * a_cs;
cs_dst = 0;
rs_dst = 1 * a_cs;
inc_dst = a_rs;
}
}
else // if ( bl1_is_gen_storage( a_rs, a_cs ) && bl1_is_upper( uplo ) )
{
// General stride with column-major tilt looks similar to column-major.
// General stride with row-major tilt looks similar to row-major.
if ( a_rs < a_cs )
{
cs_src = a_cs;
rs_src = 0;
inc_src = 1 * a_rs;
cs_dst = 1 * a_rs;
rs_dst = 0;
inc_dst = a_cs;
}
else // if ( a_rs > a_cs )
{
cs_src = 0;
rs_src = 1 * a_cs;
inc_src = a_rs;
cs_dst = 0;
rs_dst = a_rs;
inc_dst = 1 * a_cs;
}
}
for ( j = 0; j < n_iter; j++ )
{
a_src = a + j*cs_src + j*rs_src;
a_dst = a + j*cs_dst + j*rs_dst;
bl1_zcopyv( conj,
j,
a_src, inc_src,
a_dst, inc_dst );
if ( bl1_is_conj( conj ) )
{
a_jj = a + j*a_rs + j*a_cs;
a_jj->imag = bl1_d0();
}
}
}
1.7.6.1