summarylogtreecommitdiffstats
path: root/scipy.patch
blob: 1215ffc1dc43f4689aff4d085b2a23a9fc62dcd2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
diff --git a/pygam/pygam.py b/pygam/pygam.py
index bebcef1..a5c9229 100644
--- a/pygam/pygam.py
+++ b/pygam/pygam.py
@@ -703,7 +703,7 @@ class GAM(Core, MetaTermMixin):
 
         # solve the linear problem
         return np.linalg.solve(
-            load_diagonal(modelmat.T.dot(modelmat).A), modelmat.T.dot(y_)
+            load_diagonal(modelmat.T.dot(modelmat).toarray()), modelmat.T.dot(y_)
         )
 
         # not sure if this is faster...
@@ -780,7 +780,7 @@ class GAM(Core, MetaTermMixin):
             self._on_loop_start(vars())
 
             WB = W.dot(modelmat[mask, :])  # common matrix product
-            Q, R = np.linalg.qr(WB.A)
+            Q, R = np.linalg.qr(WB.toarray())
 
             if not np.isfinite(Q).all() or not np.isfinite(R).all():
                 raise ValueError(
@@ -1401,7 +1401,7 @@ class GAM(Core, MetaTermMixin):
         idxs = self.terms.get_coef_indices(term)
         cov = self.statistics_['cov'][idxs][:, idxs]
 
-        var = (modelmat.dot(cov) * modelmat.A).sum(axis=1)
+        var = (modelmat.dot(cov) * modelmat.toarray()).sum(axis=1)
         if prediction:
             var += self.distribution.scale
 
diff --git a/pygam/terms.py b/pygam/terms.py
index c033e72..83fb22a 100644
--- a/pygam/terms.py
+++ b/pygam/terms.py
@@ -1500,7 +1500,7 @@ class TensorTerm(SplineTerm, MetaTermMixin):
             )
 
             # now enter it into the composite
-            composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.A
+            composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.toarray()
 
         return sp.sparse.csc_matrix(composite_C)
 
diff --git a/pygam/tests/test_penalties.py b/pygam/tests/test_penalties.py
index bf6dd68..ac32ff0 100644
--- a/pygam/tests/test_penalties.py
+++ b/pygam/tests/test_penalties.py
@@ -23,13 +23,13 @@ def test_single_spline_penalty():
     monotonic_ and convexity_ should be 0.
     """
     coef = np.array(1.0)
-    assert np.alltrue(derivative(1, coef).A == 0.0)
-    assert np.alltrue(l2(1, coef).A == 1.0)
-    assert np.alltrue(monotonic_inc(1, coef).A == 0.0)
-    assert np.alltrue(monotonic_dec(1, coef).A == 0.0)
-    assert np.alltrue(convex(1, coef).A == 0.0)
-    assert np.alltrue(concave(1, coef).A == 0.0)
-    assert np.alltrue(none(1, coef).A == 0.0)
+    assert np.all(derivative(1, coef).toarray() == 0.0)
+    assert np.all(l2(1, coef).toarray() == 1.0)
+    assert np.all(monotonic_inc(1, coef).toarray() == 0.0)
+    assert np.all(monotonic_dec(1, coef).toarray() == 0.0)
+    assert np.all(convex(1, coef).toarray() == 0.0)
+    assert np.all(concave(1, coef).toarray() == 0.0)
+    assert np.all(none(1, coef).toarray() == 0.0)
 
 
 def test_wrap_penalty():
@@ -43,12 +44,12 @@ def test_wrap_penalty():
 
     fit_linear = True
     p = wrap_penalty(none, fit_linear, linear_penalty=linear_penalty)
-    P = p(n, coef).A
+    P = p(n, coef).toarray()
     assert P.sum() == linear_penalty
 
     fit_linear = False
     p = wrap_penalty(none, fit_linear, linear_penalty=linear_penalty)
-    P = p(n, coef).A
+    P = p(n, coef).toarray()
     assert P.sum() == 0.0
 
 
diff --git a/pygam/tests/test_terms.py b/pygam/tests/test_terms.py
index 72c9ce0..a773ac9 100644
--- a/pygam/tests/test_terms.py
+++ b/pygam/tests/test_terms.py
@@ -315,10 +315,10 @@ def test_tensor_composite_constraints_equal_penalties():
 
     # check all the dimensions
     for i in range(3):
-        P = term._build_marginal_penalties(i).A
+        P = term._build_marginal_penalties(i).toarray()
         C = term._build_marginal_constraints(
             i, -np.arange(term.n_coefs), constraint_lam=1, constraint_l2=0
-        ).A
+        ).toarray()
 
         assert (P == C).all()
 
@@ -362,11 +362,11 @@ class TestRegressions(object):
         term = SplineTerm(feature=0, penalties=['auto', 'none'])
 
         # penalties should be equivalent
-        assert (term.build_penalties() == base_term.build_penalties()).A.all()
+        assert (term.build_penalties() == base_term.build_penalties()).toarray().all()
 
         # multitple penalties should be additive, not multiplicative,
         # so 'none' penalty should have no effect
-        assert np.abs(term.build_penalties().A).sum() > 0
+        assert np.abs(term.build_penalties().toarray()).sum() > 0
 
     def test_compose_constraints(self, hepatitis_X_y):
         """we should be able to compose penalties
diff --git a/pygam/utils.py b/pygam/utils.py
index bc65ad5..279989b 100644
--- a/pygam/utils.py
+++ b/pygam/utils.py
@@ -64,7 +64,7 @@ def cholesky(A, sparse=True, verbose=True):  # noqa: F811
 
         if sparse:
             return L.T  # upper triangular factorization
-        return L.T.A  # upper triangular factorization
+        return L.T.toarray()  # upper triangular factorization
 
     else:
         msg = (
@@ -78,7 +78,7 @@ def cholesky(A, sparse=True, verbose=True):  # noqa: F811
             warnings.warn(msg)
 
         if sp.sparse.issparse(A):
-            A = A.A
+            A = A.toarray()
 
         try:
             L = sp.linalg.cholesky(A, lower=False)
@@ -951,10 +951,10 @@ def tensor_product(a, b, reshape=True):
         raise ValueError('both arguments must have the same number of samples')
 
     if sp.sparse.issparse(a):
-        a = a.A
+        a = a.toarray()
 
     if sp.sparse.issparse(b):
-        b = b.A
+        b = b.toarray()
 
     tensor = a[..., :, None] * b[..., None, :]