Skip to content

Commit

Permalink
MNT update Travis dependencies to latest available versions (scikit-l…
Browse files Browse the repository at this point in the history
  • Loading branch information
naoyak authored and jnothman committed Aug 13, 2018
1 parent 391209b commit c1738a3
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 25 deletions.
10 changes: 5 additions & 5 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ matrix:
NUMPY_VERSION="1.10.4" SCIPY_VERSION="0.16.1" CYTHON_VERSION="0.25.2"
PILLOW_VERSION="4.0.0" COVERAGE=true
if: type != cron
# This environment tests the newest supported Anaconda release.
# This environment tests the latest available dependencies.
# It runs tests requiring pandas and PyAMG.
# It also runs with the site joblib instead of the vendored copy of joblib.
- env: DISTRIB="conda" PYTHON_VERSION="3.6.2" INSTALL_MKL="true"
NUMPY_VERSION="1.14.2" SCIPY_VERSION="1.0.0" PANDAS_VERSION="0.20.3"
CYTHON_VERSION="0.26.1" PYAMG_VERSION="3.3.2" PILLOW_VERSION="4.3.0"
JOBLIB_VERSION="0.12" COVERAGE=true
- env: DISTRIB="conda" PYTHON_VERSION="*" INSTALL_MKL="true"
NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*"
CYTHON_VERSION="*" PYAMG_VERSION="*" PILLOW_VERSION="*"
JOBLIB_VERSION="*" COVERAGE=true
CHECK_PYTEST_SOFT_DEPENDENCY="true" TEST_DOCSTRINGS="true"
SKLEARN_SITE_JOBLIB=1
if: type != cron
Expand Down
9 changes: 4 additions & 5 deletions examples/applications/plot_prediction_latency.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
Expand All @@ -50,7 +49,7 @@ def atomic_benchmark_estimator(estimator, X_test, verbose=False):
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
print("atomic_benchmark runtimes:", min(runtimes), np.percentile(
runtimes, 50), max(runtimes))
return runtimes

Expand All @@ -65,7 +64,7 @@ def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
print("bulk_benchmark runtimes:", min(runtimes), np.percentile(
runtimes, 50), max(runtimes))
return runtimes

Expand Down Expand Up @@ -207,8 +206,8 @@ def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
percentiles[cls_name][n] = 1e6 * np.percentile(runtimes,
percentile)
return percentiles


Expand Down
3 changes: 1 addition & 2 deletions sklearn/covariance/elliptic_envelope.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,7 @@ def fit(self, X, y=None):
y : (ignored)
"""
super(EllipticEnvelope, self).fit(X)
self.offset_ = sp.stats.scoreatpercentile(
-self.dist_, 100. * self.contamination)
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
return self

def decision_function(self, X, raw_values=None):
Expand Down
7 changes: 3 additions & 4 deletions sklearn/ensemble/gradient_boosting.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
import numbers
import numpy as np

from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
Expand Down Expand Up @@ -91,7 +90,7 @@ def fit(self, X, y, sample_weight=None):
Individual weights for each sample
"""
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
self.quantile = np.percentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
Expand Down Expand Up @@ -608,7 +607,7 @@ def __call__(self, y, pred, sample_weight=None):
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma = np.percentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)

Expand Down Expand Up @@ -641,7 +640,7 @@ def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma = np.percentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
Expand Down
8 changes: 4 additions & 4 deletions sklearn/ensemble/iforest.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,8 +267,8 @@ def fit(self, X, y=None, sample_weight=None):
"'auto' when behaviour == 'old'.")

self.offset_ = -0.5
self._threshold_ = sp.stats.scoreatpercentile(
self.decision_function(X), 100. * self._contamination)
self._threshold_ = np.percentile(self.decision_function(X),
100. * self._contamination)

return self

Expand All @@ -281,8 +281,8 @@ def fit(self, X, y=None, sample_weight=None):

# else, define offset_ wrt contamination parameter, so that the
# threshold_ attribute is implicitly 0 and is not needed anymore:
self.offset_ = sp.stats.scoreatpercentile(
self.score_samples(X), 100. * self._contamination)
self.offset_ = np.percentile(self.score_samples(X),
100. * self._contamination)

return self

Expand Down
3 changes: 1 addition & 2 deletions sklearn/feature_selection/univariate_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,8 +438,7 @@ def _get_support_mask(self):
return np.zeros(len(self.scores_), dtype=np.bool)

scores = _clean_nans(self.scores_)
threshold = stats.scoreatpercentile(scores,
100 - self.percentile)
threshold = np.percentile(scores, 100 - self.percentile)
mask = scores > threshold
ties = np.where(scores == threshold)[0]
if len(ties):
Expand Down
5 changes: 2 additions & 3 deletions sklearn/neighbors/lof.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import numpy as np
import warnings
from scipy.stats import scoreatpercentile

from .base import NeighborsBase
from .base import KNeighborsMixin
Expand Down Expand Up @@ -262,8 +261,8 @@ def fit(self, X, y=None):
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = scoreatpercentile(
self.negative_outlier_factor_, 100. * self._contamination)
self.offset_ = np.percentile(self.negative_outlier_factor_,
100. * self._contamination)

return self

Expand Down

0 comments on commit c1738a3

Please sign in to comment.