+ gsl_permutation_reverse (perm);
+}
+
+
+static void
+drot_go (double phi, double *l0, double *l1)
+{
+ double r0 = cos (phi) * *l0 + sin (phi) * *l1;
+ double r1 = - sin (phi) * *l0 + cos (phi) * *l1;
+
+ *l0 = r0;
+ *l1 = r1;
+}
+
+
+static gsl_matrix *
+clone_matrix (const gsl_matrix *m)
+{
+ int j, k;
+ gsl_matrix *c = gsl_matrix_calloc (m->size1, m->size2);
+
+ for (j = 0 ; j < c->size1; ++j)
+ {
+ for (k = 0 ; k < c->size2; ++k)
+ {
+ const double *v = gsl_matrix_const_ptr (m, j, k);
+ gsl_matrix_set (c, j, k, *v);
+ }
+ }
+
+ return c;
+}
+
+
+static double
+initial_sv (const gsl_matrix *fm)
+{
+ int j, k;
+
+ double sv = 0.0;
+ for (j = 0 ; j < fm->size2; ++j)
+ {
+ double l4s = 0;
+ double l2s = 0;
+
+ for (k = j + 1 ; k < fm->size2; ++k)
+ {
+ double lambda = gsl_matrix_get (fm, k, j);
+ double lambda_sq = lambda * lambda;
+ double lambda_4 = lambda_sq * lambda_sq;
+
+ l4s += lambda_4;
+ l2s += lambda_sq;
+ }
+ sv += ( fm->size1 * l4s - (l2s * l2s) ) / (fm->size1 * fm->size1 );
+ }
+ return sv;
+}
+
+static void
+rotate (const struct cmd_factor *cf, const gsl_matrix *unrot,
+ const gsl_vector *communalities,
+ gsl_matrix *result,
+ gsl_vector *rotated_loadings,
+ gsl_matrix *pattern_matrix,
+ gsl_matrix *factor_correlation_matrix
+ )
+{
+ int j, k;
+ int i;
+ double prev_sv;
+
+ /* First get a normalised version of UNROT */
+ gsl_matrix *normalised = gsl_matrix_calloc (unrot->size1, unrot->size2);
+ gsl_matrix *h_sqrt = gsl_matrix_calloc (communalities->size, communalities->size);
+ gsl_matrix *h_sqrt_inv ;
+
+ /* H is the diagonal matrix containing the absolute values of the communalities */
+ for (i = 0 ; i < communalities->size ; ++i)
+ {
+ double *ptr = gsl_matrix_ptr (h_sqrt, i, i);
+ *ptr = fabs (gsl_vector_get (communalities, i));
+ }
+
+ /* Take the square root of the communalities */
+ gsl_linalg_cholesky_decomp (h_sqrt);
+
+
+ /* Save a copy of h_sqrt and invert it */
+ h_sqrt_inv = clone_matrix (h_sqrt);
+ gsl_linalg_cholesky_decomp (h_sqrt_inv);
+ gsl_linalg_cholesky_invert (h_sqrt_inv);
+
+ /* normalised vertion is H^{1/2} x UNROT */
+ gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, h_sqrt_inv, unrot, 0.0, normalised);
+
+ gsl_matrix_free (h_sqrt_inv);
+
+
+ /* Now perform the rotation iterations */
+
+ prev_sv = initial_sv (normalised);
+ for (i = 0 ; i < cf->rotation_iterations ; ++i)
+ {
+ double sv = 0.0;
+ for (j = 0 ; j < normalised->size2; ++j)
+ {
+ /* These variables relate to the convergence criterium */
+ double l4s = 0;
+ double l2s = 0;
+
+ for (k = j + 1 ; k < normalised->size2; ++k)
+ {
+ int p;
+ double a = 0.0;
+ double b = 0.0;
+ double c = 0.0;
+ double d = 0.0;
+ double x, y;
+ double phi;
+
+ for (p = 0; p < normalised->size1; ++p)
+ {
+ double jv = gsl_matrix_get (normalised, p, j);
+ double kv = gsl_matrix_get (normalised, p, k);
+
+ double u = jv * jv - kv * kv;
+ double v = 2 * jv * kv;
+ a += u;
+ b += v;
+ c += u * u - v * v;
+ d += 2 * u * v;
+ }
+
+ rotation_coeff [cf->rotation] (&x, &y, a, b, c, d, normalised);
+
+ phi = atan2 (x, y) / 4.0 ;
+
+ /* Don't bother rotating if the angle is small */
+ if ( fabs (sin (phi) ) <= pow (10.0, -15.0))
+ continue;
+
+ for (p = 0; p < normalised->size1; ++p)
+ {
+ double *lambda0 = gsl_matrix_ptr (normalised, p, j);
+ double *lambda1 = gsl_matrix_ptr (normalised, p, k);
+ drot_go (phi, lambda0, lambda1);
+ }
+
+ /* Calculate the convergence criterium */
+ {
+ double lambda = gsl_matrix_get (normalised, k, j);
+ double lambda_sq = lambda * lambda;
+ double lambda_4 = lambda_sq * lambda_sq;
+
+ l4s += lambda_4;
+ l2s += lambda_sq;
+ }
+ }
+ sv += ( normalised->size1 * l4s - (l2s * l2s) ) / (normalised->size1 * normalised->size1 );
+ }
+
+ if ( fabs (sv - prev_sv) <= cf->rconverge)
+ break;
+
+ prev_sv = sv;
+ }
+
+ gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0,
+ h_sqrt, normalised, 0.0, result);
+
+ gsl_matrix_free (h_sqrt);
+ gsl_matrix_free (normalised);
+
+ if (cf->rotation == ROT_PROMAX)
+ {
+ /* general purpose m by m matrix, where m is the number of factors */
+ gsl_matrix *mm1 = gsl_matrix_calloc (unrot->size2, unrot->size2);
+ gsl_matrix *mm2 = gsl_matrix_calloc (unrot->size2, unrot->size2);
+
+ /* general purpose m by p matrix, where p is the number of variables */
+ gsl_matrix *mp1 = gsl_matrix_calloc (unrot->size2, unrot->size1);
+
+ gsl_matrix *pm1 = gsl_matrix_calloc (unrot->size1, unrot->size2);
+
+ gsl_permutation *perm = gsl_permutation_alloc (unrot->size2);
+
+ int signum;
+
+ int i, j;
+
+ /* The following variables follow the notation by SPSS Statistical Algorithms
+ page 342 */
+ gsl_matrix *L = gsl_matrix_calloc (unrot->size2, unrot->size2);
+ gsl_matrix *P = clone_matrix (result);
+ gsl_matrix *D ;
+ gsl_matrix *Q ;
+
+
+ /* Vector of length p containing (indexed by i)
+ \Sum^m_j {\lambda^2_{ij}} */
+ gsl_vector *rssq = gsl_vector_calloc (unrot->size1);
+
+ for (i = 0; i < P->size1; ++i)
+ {
+ double sum = 0;
+ for (j = 0; j < P->size2; ++j)
+ {
+ sum += gsl_matrix_get (result, i, j)
+ * gsl_matrix_get (result, i, j);
+
+ }
+
+ gsl_vector_set (rssq, i, sqrt (sum));
+ }
+
+ for (i = 0; i < P->size1; ++i)
+ {
+ for (j = 0; j < P->size2; ++j)
+ {
+ double l = gsl_matrix_get (result, i, j);
+ double r = gsl_vector_get (rssq, i);
+ gsl_matrix_set (P, i, j, pow (fabs (l / r), cf->promax_power + 1) * r / l);
+ }
+ }
+
+ gsl_vector_free (rssq);
+
+ gsl_linalg_matmult_mod (result,
+ GSL_LINALG_MOD_TRANSPOSE,
+ result,
+ GSL_LINALG_MOD_NONE,
+ mm1);
+
+ gsl_linalg_LU_decomp (mm1, perm, &signum);
+ gsl_linalg_LU_invert (mm1, perm, mm2);
+
+ gsl_linalg_matmult_mod (mm2, GSL_LINALG_MOD_NONE,
+ result, GSL_LINALG_MOD_TRANSPOSE,
+ mp1);
+
+ gsl_linalg_matmult_mod (mp1, GSL_LINALG_MOD_NONE,
+ P, GSL_LINALG_MOD_NONE,
+ L);
+
+ D = diag_rcp_sqrt (L);
+ Q = gsl_matrix_calloc (unrot->size2, unrot->size2);
+
+ gsl_linalg_matmult_mod (L, GSL_LINALG_MOD_NONE,
+ D, GSL_LINALG_MOD_NONE,
+ Q);
+
+ gsl_matrix *QQinv = gsl_matrix_calloc (unrot->size2, unrot->size2);
+
+ gsl_linalg_matmult_mod (Q, GSL_LINALG_MOD_TRANSPOSE,
+ Q, GSL_LINALG_MOD_NONE,
+ QQinv);
+
+ gsl_linalg_cholesky_decomp (QQinv);
+ gsl_linalg_cholesky_invert (QQinv);
+
+
+ gsl_matrix *C = diag_rcp_inv_sqrt (QQinv);
+ gsl_matrix *Cinv = clone_matrix (C);
+
+ gsl_linalg_cholesky_decomp (Cinv);
+ gsl_linalg_cholesky_invert (Cinv);
+
+
+ gsl_linalg_matmult_mod (result, GSL_LINALG_MOD_NONE,
+ Q, GSL_LINALG_MOD_NONE,
+ pm1);
+
+ gsl_linalg_matmult_mod (pm1, GSL_LINALG_MOD_NONE,
+ Cinv, GSL_LINALG_MOD_NONE,
+ pattern_matrix);
+
+
+ gsl_linalg_matmult_mod (C, GSL_LINALG_MOD_NONE,
+ QQinv, GSL_LINALG_MOD_NONE,
+ mm1);
+
+ gsl_linalg_matmult_mod (mm1, GSL_LINALG_MOD_NONE,
+ C, GSL_LINALG_MOD_TRANSPOSE,
+ factor_correlation_matrix);
+
+ gsl_linalg_matmult_mod (pattern_matrix, GSL_LINALG_MOD_NONE,
+ factor_correlation_matrix, GSL_LINALG_MOD_NONE,
+ pm1);
+
+ gsl_matrix_memcpy (result, pm1);
+
+
+ gsl_matrix_free (QQinv);
+ gsl_matrix_free (C);
+ gsl_matrix_free (Cinv);
+
+ gsl_matrix_free (D);
+ gsl_matrix_free (Q);
+ gsl_matrix_free (L);
+ gsl_matrix_free (P);
+
+ gsl_permutation_free (perm);
+
+ gsl_matrix_free (mm1);
+ gsl_matrix_free (mm2);
+ gsl_matrix_free (mp1);
+ gsl_matrix_free (pm1);
+ }
+
+
+ /* reflect negative sums and populate the rotated loadings vector*/
+ for (i = 0 ; i < result->size2; ++i)
+ {
+ double ssq = 0.0;
+ double sum = 0.0;
+ for (j = 0 ; j < result->size1; ++j)
+ {
+ double s = gsl_matrix_get (result, j, i);
+ ssq += s * s;
+ sum += s;
+ }
+
+ gsl_vector_set (rotated_loadings, i, ssq);
+
+ if ( sum < 0 )
+ for (j = 0 ; j < result->size1; ++j)
+ {
+ double *lambda = gsl_matrix_ptr (result, j, i);
+ *lambda = - *lambda;
+ }
+ }