Skip to content

Instantly share code, notes, and snippets.

@lebigot
Created July 17, 2018 13:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save lebigot/8526af2425355b5cf837068c4182fc2d to your computer and use it in GitHub Desktop.
Save lebigot/8526af2425355b5cf837068c4182fc2d to your computer and use it in GitHub Desktop.
List of scikit-learn places with either a raise statement or a function call that contains "warn" or "Warn", *and* some possible __repr__ triggers (scikit-learn rev. a3f8e65de)

Scikit-learn code fragments

This document contains scikit-learn code fragments:

  • with raise or function calls whose function name contains "warn"/"Warn",
  • and that also contain some mechanism that triggers a __repr__ call (repr, %r, {!r}. and variations thereof like %(name)r or {name!r})—some non-__repr__ mechanisms might be displayed too.

__check_build/__init__.py

__check_build/setup.py

__init__.py

_build_utils/__init__.py

_config.py

base.py

  • Line 55, col. 12 in clone():

      raise TypeError(
          "Cannot clone object '%s' (type %s): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' methods."
           % (repr(estimator), type(estimator)))
    

calibration.py

cluster/bicluster.py

cluster/mean_shift_.py

cluster/hierarchical.py

cluster/tests/test_mean_shift.py

cluster/tests/test_k_means.py

cluster/tests/test_feature_agglomeration.py

cluster/tests/test_dbscan.py

cluster/tests/test_birch.py

cluster/tests/test_affinity_propagation.py

cluster/tests/__init__.py

cluster/tests/test_hierarchical.py

cluster/tests/common.py

cluster/tests/test_bicluster.py

  • Line 211, col. 4 in test_perfect_checkerboard():

      raise SkipTest(
          'This test is failing on the buildbot, but cannot reproduce. Temporarily disabling it until it can be reproduced and  fixed.'
          )
    

cluster/tests/test_spectral.py

cluster/_feature_agglomeration.py

cluster/__init__.py

cluster/k_means_.py

  • Line 329, col. 8 in k_means():

      raise ValueError(
          "precompute_distances should be 'auto' or True/False, but a value of %r was passed"
           % precompute_distances)
    

cluster/affinity_propagation_.py

  • Line 112, col. 8 in affinity_propagation():

      raise ValueError('S must be a square array (shape=%s)' % repr(S.shape))
    

cluster/setup.py

cluster/spectral.py

cluster/birch.py

cluster/dbscan_.py

compose/_target.py

compose/tests/test_target.py

compose/tests/__init__.py

compose/tests/test_column_transformer.py

compose/__init__.py

compose/_column_transformer.py

covariance/graph_lasso_.py

covariance/robust_covariance.py

covariance/tests/test_graph_lasso.py

covariance/tests/__init__.py

covariance/tests/test_covariance.py

covariance/tests/test_robust_covariance.py

covariance/tests/test_graphical_lasso.py

covariance/tests/test_elliptic_envelope.py

covariance/__init__.py

covariance/elliptic_envelope.py

covariance/empirical_covariance_.py

covariance/shrunk_covariance_.py

cross_decomposition/tests/test_pls.py

cross_decomposition/tests/__init__.py

cross_decomposition/__init__.py

cross_decomposition/cca_.py

cross_decomposition/pls_.py

datasets/kddcup99.py

datasets/olivetti_faces.py

datasets/svmlight_format.py

  • Line 440, col. 12 in dump_svmlight_file():

      raise ValueError('expected y of shape (n_samples, 1), got %r' % (yval.shape,))
    
  • Line 444, col. 12 in dump_svmlight_file():

      raise ValueError('expected y of shape (n_samples,), got %r' % (yval.shape,))
    
  • Line 449, col. 8 in dump_svmlight_file():

      raise ValueError(
          'X.shape[0] and y.shape[0] should be the same, got %r and %r instead.' %
          (Xval.shape[0], yval.shape[0]))
    
  • Line 472, col. 12 in dump_svmlight_file():

      raise ValueError('expected query_id of shape (n_samples,), got %r' % (
          query_id.shape,))
    

datasets/samples_generator.py

  • Line 792, col. 12 in make_blobs():

      raise ValueError('Parameter `centers` must be array-like. Got {!r} instead'
          .format(centers))
    

datasets/mlcomp.py

datasets/covtype.py

datasets/tests/test_california_housing.py

datasets/tests/test_common.py

datasets/tests/test_lfw.py

datasets/tests/test_mldata.py

datasets/tests/test_rcv1.py

datasets/tests/test_kddcup99.py

datasets/tests/__init__.py

datasets/tests/test_covtype.py

datasets/tests/test_svmlight_format.py

datasets/tests/test_20news.py

datasets/tests/test_samples_generator.py

datasets/tests/test_base.py

datasets/lfw.py

  • Line 385, col. 12 in _fetch_lfw_pairs():

      raise ValueError('invalid line %d: %r' % (i + 1, components))
    
  • Line 507, col. 8 in fetch_lfw_pairs():

      raise ValueError("subset='%s' is invalid: should be one of %r" % (subset,
          list(sorted(label_filenames.keys()))))
    

datasets/__init__.py

datasets/rcv1.py

datasets/twenty_newsgroups.py

  • Line 381, col. 8 in fetch_20newsgroups_vectorized():

      raise ValueError(
          "%r is not a valid subset: should be one of ['train', 'test', 'all']" %
          subset)
    

datasets/setup.py

datasets/mldata.py

datasets/species_distributions.py

datasets/california_housing.py

datasets/base.py

decomposition/dict_learning.py

  • Line 518, col. 8 in dict_learning():

      raise ValueError('Coding method %r not supported as a fit algorithm.' % method)
    

decomposition/factor_analysis.py

decomposition/kernel_pca.py

decomposition/online_lda.py

  • Line 301, col. 12 in LatentDirichletAllocation():

      raise ValueError("Invalid 'n_components' parameter: %r" % self._n_components)
    
  • Line 305, col. 12 in LatentDirichletAllocation():

      raise ValueError("Invalid 'total_samples' parameter: %r" % self.total_samples)
    
  • Line 309, col. 12 in LatentDirichletAllocation():

      raise ValueError("Invalid 'learning_offset' parameter: %r" % self.
          learning_offset)
    
  • Line 313, col. 12 in LatentDirichletAllocation():

      raise ValueError("Invalid 'learning_method' parameter: %r" % self.
          learning_method)
    

decomposition/fastica_.py

  • Line 291, col. 8 in fastica():

      raise exc(
          "Unknown function %r; should be one of 'logcosh', 'exp', 'cube' or callable"
           % fun)
    

decomposition/tests/test_dict_learning.py

decomposition/tests/test_fastica.py

decomposition/tests/test_truncated_svd.py

decomposition/tests/__init__.py

decomposition/tests/test_nmf.py

decomposition/tests/test_online_lda.py

decomposition/tests/test_kernel_pca.py

decomposition/tests/test_sparse_pca.py

decomposition/tests/test_factor_analysis.py

decomposition/tests/test_incremental_pca.py

decomposition/tests/test_pca.py

decomposition/incremental_pca.py

  • Line 220, col. 12 in IncrementalPCA():

      raise ValueError(
          'n_components=%r invalid for n_features=%d, need more rows than columns for IncrementalPCA processing'
           % (self.n_components, n_features))
    
  • Line 224, col. 12 in IncrementalPCA():

      raise ValueError(
          'n_components=%r must be less or equal to the batch number of samples %d.'
           % (self.n_components, n_samples))
    

decomposition/__init__.py

decomposition/sparse_pca.py

decomposition/truncated_svd.py

  • Line 179, col. 12 in TruncatedSVD():

      raise ValueError('unknown algorithm %r' % self.algorithm)
    

decomposition/setup.py

decomposition/pca.py

  • Line 422, col. 12 in PCA():

      raise ValueError(
          "n_components=%r must be between 0 and min(n_samples, n_features)=%r with svd_solver='full'"
           % (n_components, min(n_samples, n_features)))
    
  • Line 428, col. 16 in PCA():

      raise ValueError(
          'n_components=%r must be of type int when greater than or equal to 1, was of type=%r'
           % (n_components, type(n_components)))
    
  • Line 483, col. 12 in PCA():

      raise ValueError("n_components=%r cannot be a string with svd_solver='%s'" %
          (n_components, svd_solver))
    
  • Line 487, col. 12 in PCA():

      raise ValueError(
          "n_components=%r must be between 1 and min(n_samples, n_features)=%r with svd_solver='%s'"
           % (n_components, min(n_samples, n_features), svd_solver))
    
  • Line 493, col. 12 in PCA():

      raise ValueError(
          'n_components=%r must be of type int when greater than or equal to 1, was of type=%r'
           % (n_components, type(n_components)))
    
  • Line 498, col. 12 in PCA():

      raise ValueError(
          "n_components=%r must be strictly less than min(n_samples, n_features)=%r with svd_solver='%s'"
           % (n_components, min(n_samples, n_features), svd_solver))
    

decomposition/nmf.py

  • Line 205, col. 8 in _check_string_param():

      raise ValueError('Invalid solver parameter: got %r instead of one of %r' %
          (solver, allowed_solver))
    
  • Line 211, col. 8 in _check_string_param():

      raise ValueError(
          'Invalid regularization parameter: got %r instead of one of %r' % (
          regularization, allowed_regularization))
    
  • Line 217, col. 8 in _check_string_param():

      raise ValueError(
          'Invalid beta_loss parameter: solver %r does not handle beta_loss = %r' %
          (solver, beta_loss))
    
  • Line 241, col. 8 in _beta_loss_to_float():

      raise ValueError(
          'Invalid beta_loss parameter: got %r instead of one of %r, or a float.' %
          (beta_loss, allowed_beta_loss.keys()))
    
  • Line 378, col. 8 in _initialize_nmf():

      raise ValueError('Invalid init parameter: got %r instead of one of %r' % (
          init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
    
  • Line 1003, col. 8 in non_negative_factorization():

      raise ValueError(
          'Number of components must be a positive integer; got (n_components=%r)' %
          n_components)
    
  • Line 1006, col. 8 in non_negative_factorization():

      raise ValueError(
          'Maximum number of iterations must be a positive integer; got (max_iter=%r)'
           % max_iter)
    
  • Line 1009, col. 8 in non_negative_factorization():

      raise ValueError(
          'Tolerance for stopping criteria must be positive; got (tol=%r)' % tol)
    

decomposition/base.py

discriminant_analysis.py

dummy.py

ensemble/bagging.py

ensemble/gradient_boosting.py

  • Line 70, col. 12 in QuantileEstimator():

      raise ValueError('`alpha` must be in (0, 1.0) but was %r' % alpha)
    
  • Line 267, col. 12 in RegressionLossFunction():

      raise ValueError('``n_classes`` must be 1 for regression but was %r' %
          n_classes)
    
  • Line 813, col. 12 in BaseGradientBoosting():

      raise ValueError('n_estimators must be greater than 0 but was %r' % self.
          n_estimators)
    
  • Line 817, col. 12 in BaseGradientBoosting():

      raise ValueError('learning_rate must be greater than 0 but was %r' % self.
          learning_rate)
    
  • Line 837, col. 12 in BaseGradientBoosting():

      raise ValueError('subsample must be in (0,1] but was %r' % self.subsample)
    
  • Line 847, col. 20 in BaseGradientBoosting():

      raise ValueError(
          'init=%r must be valid BaseEstimator and support both fit and predict' %
          self.init)
    
  • Line 852, col. 12 in BaseGradientBoosting():

      raise ValueError('alpha must be in (0.0, 1.0) but was %r' % self.alpha)
    
  • Line 868, col. 16 in BaseGradientBoosting():

      raise ValueError(
          "Invalid value for max_features: %r. Allowed string values are 'auto', 'sqrt' or 'log2'."
           % self.max_features)
    
  • Line 886, col. 12 in BaseGradientBoosting():

      raise ValueError(
          'n_iter_no_change should either be None or an integer. %r was passed' %
          self.n_iter_no_change)
    
  • Line 892, col. 12 in BaseGradientBoosting():

      raise ValueError("'presort' should be in {}. Got {!r} instead.".format(
          allowed_presort, self.presort))
    
  • Line 1685, col. 12 in GradientBoostingClassifier():

      raise AttributeError('loss=%r does not support predict_proba' % self.loss)
    
  • Line 1736, col. 12 in GradientBoostingClassifier():

      raise AttributeError('loss=%r does not support predict_proba' % self.loss)
    

ensemble/tests/test_partial_dependence.py

ensemble/tests/test_forest.py

ensemble/tests/test_bagging.py

ensemble/tests/__init__.py

ensemble/tests/test_gradient_boosting_loss_functions.py

ensemble/tests/test_weight_boosting.py

ensemble/tests/test_gradient_boosting.py

ensemble/tests/test_iforest.py

ensemble/tests/test_base.py

ensemble/tests/test_voting_classifier.py

ensemble/voting_classifier.py

  • Line 162, col. 12 in VotingClassifier():

      raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)" % self.
          voting)
    
  • Line 250, col. 12 in VotingClassifier():

      raise AttributeError('predict_proba is not available when voting=%r' % self
          .voting)
    

ensemble/weight_boosting.py

ensemble/__init__.py

ensemble/setup.py

ensemble/forest.py

ensemble/base.py

ensemble/partial_dependence.py

ensemble/iforest.py

  • Line 212, col. 16 in IsolationForest():

      raise ValueError('max_samples must be in (0, 1], got %r' % self.max_samples)
    

exceptions.py

feature_extraction/tests/test_dict_vectorizer.py

feature_extraction/tests/test_image.py

feature_extraction/tests/__init__.py

feature_extraction/tests/test_text.py

feature_extraction/tests/test_feature_hasher.py

feature_extraction/__init__.py

feature_extraction/dict_vectorizer.py

feature_extraction/setup.py

feature_extraction/text.py

  • Line 817, col. 16 in CountVectorizer():

      raise ValueError(u'max_features=%r, neither a positive integer nor None' %
          max_features)
    

feature_extraction/hashing.py

  • Line 106, col. 12 in FeatureHasher():

      raise TypeError('n_features must be integral, got %r (%s).' % (n_features,
          type(n_features)))
    
  • Line 112, col. 12 in FeatureHasher():

      raise ValueError("input_type must be 'dict', 'pair' or 'string', got %r." %
          input_type)
    

feature_extraction/image.py

  • Line 239, col. 12 in _compute_n_patches():

      raise ValueError('Invalid value for max_patches: %r' % max_patches)
    

feature_extraction/stop_words.py

feature_selection/rfe.py

feature_selection/tests/test_variance_threshold.py

feature_selection/tests/test_rfe.py

feature_selection/tests/test_from_model.py

feature_selection/tests/__init__.py

feature_selection/tests/test_feature_select.py

feature_selection/tests/test_chi2.py

feature_selection/tests/test_base.py

feature_selection/tests/test_mutual_info.py

feature_selection/__init__.py

feature_selection/variance_threshold.py

feature_selection/univariate_selection.py

  • Line 416, col. 12 in SelectPercentile():

      raise ValueError('percentile should be >=0, <=100; got %r' % self.percentile)
    
  • Line 490, col. 12 in SelectKBest():

      raise ValueError(
          "k should be >=0, <= n_features = %d; got %r. Use k='all' to return all features."
           % (X.shape[1], self.k))
    
  • Line 740, col. 12 in GenericUnivariateSelect():

      raise ValueError(
          'The mode passed should be one of %s, %r, (type %s) was passed.' % (
          self._selection_modes.keys(), self.mode, type(self.mode)))
    

feature_selection/mutual_info_.py

feature_selection/base.py

feature_selection/from_model.py

gaussian_process/gpr.py

gaussian_process/tests/__init__.py

gaussian_process/tests/test_gpr.py

gaussian_process/tests/test_kernels.py

gaussian_process/tests/test_gpc.py

gaussian_process/correlation_models.py

gaussian_process/__init__.py

gaussian_process/regression_models.py

gaussian_process/gpc.py

gaussian_process/kernels.py

impute.py

  • Line 187, col. 12 in SimpleImputer():

      raise ValueError(
          'SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.'
          .format(X.dtype))
    

isotonic.py

kernel_approximation.py

kernel_ridge.py

linear_model/ransac.py

linear_model/perceptron.py

linear_model/least_angle.py

linear_model/logistic.py

  • Line 1210, col. 12 in LogisticRegression():

      raise ValueError('Penalty term must be positive; got (C=%r)' % self.C)
    
  • Line 1213, col. 12 in LogisticRegression():

      raise ValueError(
          'Maximum number of iteration must be positive; got (max_iter=%r)' %
          self.max_iter)
    
  • Line 1216, col. 12 in LogisticRegression():

      raise ValueError(
          'Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol
          )
    
  • Line 1254, col. 12 in LogisticRegression():

      raise ValueError(
          'This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r'
           % classes_[0])
    
  • Line 1618, col. 12 in LogisticRegressionCV():

      raise ValueError(
          'Maximum number of iteration must be positive; got (max_iter=%r)' %
          self.max_iter)
    
  • Line 1621, col. 12 in LogisticRegressionCV():

      raise ValueError(
          'Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol
          )
    
  • Line 1655, col. 12 in LogisticRegressionCV():

      raise ValueError(
          'This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r'
           % classes[0])
    

linear_model/coordinate_descent.py

  • Line 479, col. 12 in enet_path():

      raise ValueError(
          "Precompute should be one of True, False, 'auto' or array-like. Got %r" %
          precompute)
    
  • Line 701, col. 12 in ElasticNet():

      raise ValueError(
          'precompute should be one of True, False or array-like. Got %r' % self.
          precompute)
    
  • Line 1091, col. 12 in LinearModelCV():

      raise ValueError('y has 0 samples: %r' % y)
    

linear_model/tests/test_logistic.py

linear_model/tests/test_sgd.py

linear_model/tests/test_huber.py

linear_model/tests/test_bayes.py

linear_model/tests/test_theil_sen.py

linear_model/tests/test_ridge.py

linear_model/tests/test_passive_aggressive.py

linear_model/tests/__init__.py

linear_model/tests/test_randomized_l1.py

linear_model/tests/test_least_angle.py

linear_model/tests/test_ransac.py

linear_model/tests/test_coordinate_descent.py

linear_model/tests/test_sparse_coordinate_descent.py

linear_model/tests/test_perceptron.py

linear_model/tests/test_sag.py

linear_model/tests/test_base.py

linear_model/tests/test_omp.py

linear_model/randomized_l1.py

  • Line 43, col. 8 in _resample_model():

      raise ValueError("'scaling' should be between 0 and 1. Got %r instead." %
          scaling)
    
  • Line 111, col. 12 in BaseRandomizedLinearModel():

      raise ValueError(
          "'memory' should either be a string or a sklearn.externals.joblib.Memory instance, got 'memory={!r}' instead."
          .format(type(memory)))
    
  • Line 630, col. 8 in lasso_stability_path():

      raise ValueError(
          "Parameter 'scaling' should be between 0 and 1. Got %r instead." % scaling)
    

linear_model/__init__.py

linear_model/setup.py

linear_model/sag.py

linear_model/bayes.py

linear_model/omp.py

linear_model/passive_aggressive.py

linear_model/stochastic_gradient.py

  • Line 282, col. 12 in BaseSGD():

      raise ValueError(
          'Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.'
           % (n_samples, self.validation_fraction, X_train.shape[0], X_val.shape[0]))
    
  • Line 962, col. 12 in SGDClassifier():

      raise AttributeError('probability estimates are not available for loss=%r' %
          self.loss)
    
  • Line 1043, col. 12 in SGDClassifier():

      raise NotImplementedError(
          "predict_(log_)proba only supported when loss='log' or loss='modified_huber' (%r given)"
           % self.loss)
    

linear_model/huber.py

linear_model/ridge.py

linear_model/theil_sen.py

linear_model/base.py

manifold/locally_linear.py

manifold/t_sne.py

manifold/mds.py

manifold/tests/test_isomap.py

manifold/tests/test_mds.py

manifold/tests/__init__.py

manifold/tests/test_t_sne.py

manifold/tests/test_spectral_embedding.py

manifold/tests/test_locally_linear.py

manifold/__init__.py

manifold/isomap.py

manifold/setup.py

manifold/spectral_embedding_.py

metrics/cluster/bicluster.py

metrics/cluster/unsupervised.py

metrics/cluster/tests/test_supervised.py

metrics/cluster/tests/test_common.py

metrics/cluster/tests/__init__.py

metrics/cluster/tests/test_bicluster.py

metrics/cluster/tests/test_unsupervised.py

metrics/cluster/__init__.py

metrics/cluster/setup.py

metrics/cluster/supervised.py

  • Line 50, col. 8 in check_clusterings():

      raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,))
    
  • Line 53, col. 8 in check_clusterings():

      raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))
    

metrics/regression.py

  • Line 94, col. 12 in _check_reg_targets():

      raise ValueError(
          "Allowed 'multioutput' string values are {}. You provided multioutput={!r}"
          .format(allowed_multioutput_str, multioutput))
    

metrics/classification.py

  • Line 1041, col. 20 in precision_recall_fscore_support():

      raise ValueError('pos_label=%r is not a valid label: %r' % (pos_label,
          present_labels))
    
  • Line 1048, col. 8 in precision_recall_fscore_support():

      warnings.warn(
          "Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class."
           % (pos_label, average), UserWarning)
    

metrics/tests/test_common.py

metrics/tests/test_score_objects.py

metrics/tests/__init__.py

metrics/tests/test_ranking.py

metrics/tests/test_pairwise.py

metrics/tests/test_classification.py

metrics/tests/test_regression.py

metrics/__init__.py

metrics/setup.py

metrics/pairwise.py

  • Line 158, col. 8 in check_paired_arrays():

      raise ValueError(
          'X and Y should be of same shape. They were respectively %r and %r long.' %
          (X.shape, Y.shape))
    
  • Line 507, col. 12 in manhattan_distances():

      raise TypeError('sum_over_features=%r not supported for sparse matrices' %
          sum_over_features)
    
  • Line 1131, col. 8 in _check_chunk_size():

      raise TypeError(
          'reduce_func returned %r. Expected sequence(s) of length %d.' % (
          reduced if is_tuple else reduced[0], chunk_size))
    
  • Line 1564, col. 8 in pairwise_kernels():

      raise ValueError('Unknown kernel %r' % metric)
    

metrics/ranking.py

  • Line 311, col. 12 in roc_auc_score():

      raise ValueError('Expected max_frp in range ]0, 1], got: %r' % max_fpr)
    

metrics/base.py

metrics/scorer.py

  • Line 218, col. 12 in get_scorer():

      raise ValueError('%r is not a valid scoring value. Valid options are %s' %
          (scoring, sorted(SCORERS.keys())))
    
  • Line 257, col. 8 in check_scoring():

      raise TypeError(
          "estimator should be an estimator implementing 'fit' method, %r was passed"
           % estimator)
    
  • Line 268, col. 12 in check_scoring():

      raise ValueError(
          'scoring value %r looks like it is a metric function rather than a scorer. A scorer should require an estimator as its first parameter. Please use `make_scorer` to convert a metric to a scorer.'
           % scoring)
    
  • Line 280, col. 12 in check_scoring():

      raise TypeError(
          "If no scoring is specified, the estimator passed should have a 'score' method. The estimator %r does not."
           % estimator)
    
  • Line 289, col. 8 in check_scoring():

      raise ValueError(
          'scoring value should either be a callable, string or None. %r was passed'
           % scoring)
    
  • Line 352, col. 16 in _check_multimetric_scoring():

      raise ValueError(err_msg + 
          'Duplicate elements were found in the given list. %r' % repr(scoring))
    
  • Line 357, col. 24 in _check_multimetric_scoring():

      raise ValueError(err_msg + 
          'One or more of the elements were callables. Use a dict of score name mapped to the scorer callable. Got %r'
           % repr(scoring))
    
  • Line 363, col. 24 in _check_multimetric_scoring():

      raise ValueError(err_msg + 
          'Non-string types were found in the given list. Got %r' % repr(scoring))
    
  • Line 370, col. 16 in _check_multimetric_scoring():

      raise ValueError(err_msg + 'Empty list was given. %r' % repr(scoring))
    
  • Line 376, col. 16 in _check_multimetric_scoring():

      raise ValueError(
          'Non-string types were found in the keys of the given dict. scoring=%r' %
          repr(scoring))
    
  • Line 379, col. 16 in _check_multimetric_scoring():

      raise ValueError('An empty dict was passed. %r' % repr(scoring))
    

mixture/tests/test_mixture.py

mixture/tests/test_bayesian_mixture.py

mixture/tests/__init__.py

mixture/tests/test_gaussian_mixture.py

mixture/__init__.py

mixture/bayesian_mixture.py

mixture/gaussian_mixture.py

mixture/base.py

model_selection/_search.py

  • Line 97, col. 12 in ParameterGrid():

      raise TypeError('Parameter grid is not a dict or a list ({!r})'.format(
          param_grid))
    
  • Line 108, col. 16 in ParameterGrid():

      raise TypeError('Parameter grid is not a dict ({!r})'.format(grid))
    
  • Line 112, col. 20 in ParameterGrid():

      raise TypeError('Parameter grid value is not iterable (key={!r}, value={!r})'
          .format(key, grid[key]))
    
  • Line 626, col. 16 in BaseSearchCV():

      raise ValueError(
          'For multi-metric scoring, the parameter refit must be set to a scorer key to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. %r was passed.'
           % self.refit)
    

model_selection/tests/test_split.py

model_selection/tests/test_validation.py

  • Line 990, col. 12 in test_learning_curve():

      raise RuntimeError('Unexpected warning: %r' % w[0].message)
    
  • Line 1007, col. 12 in test_learning_curve():

      raise RuntimeError('Unexpected warning: %r' % w[0].message)
    
  • Line 1198, col. 8 in test_validation_curve():

      raise RuntimeError('Unexpected warning: %r' % w[0].message)
    

model_selection/tests/test_search.py

model_selection/tests/__init__.py

model_selection/tests/common.py

model_selection/__init__.py

model_selection/_validation.py

  • Line 581, col. 12 in _score():

      raise ValueError(
          'scoring must return a number, got %s (%s) instead. (scorer=%r)' % (str
          (score), type(score), scorer))
    

model_selection/_split.py

  • Line 579, col. 12 in StratifiedKFold():

      raise ValueError('Supported target types are: {}. Got {!r} instead.'.format
          (allowed_target_types, type_of_target_y))
    
  • Line 1649, col. 12 in _validate_shuffle_split_init():

      raise ValueError('Invalid value for test_size: %r' % test_size)
    
  • Line 1664, col. 12 in _validate_shuffle_split_init():

      raise ValueError('Invalid value for train_size: %r' % train_size)
    

multiclass.py

multioutput.py

naive_bayes.py

neighbors/lof.py

neighbors/regression.py

neighbors/approximate.py

neighbors/unsupervised.py

neighbors/classification.py

  • Line 365, col. 12 in RadiusNeighborsClassifier():

      raise ValueError(
          'No neighbors found for test samples %r, you can try using larger radius, give a label for outliers, or consider removing them from your dataset.'
           % outliers)
    

neighbors/graph.py

neighbors/kde.py

neighbors/tests/test_nearest_centroid.py

neighbors/tests/test_kde.py

neighbors/tests/test_dist_metrics.py

neighbors/tests/test_lof.py

neighbors/tests/test_kd_tree.py

neighbors/tests/test_approximate.py

neighbors/tests/__init__.py

neighbors/tests/test_neighbors.py

neighbors/tests/test_quad_tree.py

neighbors/tests/test_ball_tree.py

neighbors/__init__.py

neighbors/setup.py

neighbors/nearest_centroid.py

neighbors/base.py

neural_network/_base.py

neural_network/multilayer_perceptron.py

neural_network/tests/test_mlp.py

neural_network/tests/__init__.py

neural_network/tests/test_rbm.py

neural_network/tests/test_stochastic_optimizers.py

neural_network/__init__.py

neural_network/_stochastic_optimizers.py

neural_network/rbm.py

pipeline.py

preprocessing/_encoders.py

  • Line 425, col. 16 in OneHotEncoder():

      raise TypeError(
          "Wrong type for parameter `n_values`. Expected 'auto', int or array of ints, got %r"
           % type(X))
    

preprocessing/tests/test_common.py

preprocessing/tests/test_label.py

preprocessing/tests/test_encoders.py

preprocessing/tests/test_function_transformer.py

preprocessing/tests/test_imputation.py

preprocessing/tests/__init__.py

preprocessing/tests/test_discretization.py

preprocessing/tests/test_data.py

preprocessing/tests/test_base.py

preprocessing/__init__.py

preprocessing/_discretization.py

  • Line 142, col. 12 in KBinsDiscretizer():

      raise ValueError("Valid options for 'encode' are {}. Got encode={!r} instead."
          .format(valid_encode, self.encode))
    
  • Line 147, col. 12 in KBinsDiscretizer():

      raise ValueError(
          "Valid options for 'strategy' are {}. Got strategy={!r} instead.".
          format(valid_strategy, self.strategy))
    
  • Line 288, col. 12 in KBinsDiscretizer():

      raise ValueError(
          "inverse_transform only supports 'encode = ordinal'. Got encode={!r} instead."
          .format(self.encode))
    

preprocessing/imputation.py

preprocessing/label.py

  • Line 410, col. 12 in LabelBinarizer():

      raise ValueError('y has 0 samples: %r' % y)
    
  • Line 584, col. 12 in label_binarize():

      raise ValueError('y has 0 samples: %r' % y)
    

preprocessing/base.py

preprocessing/data.py

preprocessing/_function_transformer.py

random_projection.py

  • Line 123, col. 8 in johnson_lindenstrauss_min_dim():

      raise ValueError('The JL bound is defined for eps in ]0, 1[, got %r' % eps)
    
  • Line 127, col. 8 in johnson_lindenstrauss_min_dim():

      raise ValueError(
          'The JL bound is defined for n_samples greater than zero, got %r' %
          n_samples)
    
  • Line 141, col. 8 in _check_density():

      raise ValueError('Expected density in range ]0, 1], got: %r' % density)
    

semi_supervised/tests/__init__.py

semi_supervised/tests/test_label_propagation.py

semi_supervised/__init__.py

semi_supervised/label_propagation.py

setup.py

svm/tests/test_sparse.py

svm/tests/test_bounds.py

svm/tests/__init__.py

svm/tests/test_svm.py

svm/__init__.py

svm/setup.py

svm/classes.py

  • Line 224, col. 12 in LinearSVC():

      raise ValueError('Penalty term must be positive; got (C=%r)' % self.C)
    
  • Line 412, col. 12 in LinearSVR():

      raise ValueError('Penalty term must be positive; got (C=%r)' % self.C)
    

svm/bounds.py

svm/base.py

  • Line 167, col. 12 in BaseLibSVM():

      raise ValueError(
          """sample_weight and X have incompatible shapes: %r vs %r
      Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV)."""
           % (sample_weight.shape, X.shape))
    
  • Line 465, col. 12 in BaseLibSVM():

      raise ValueError('cannot use sparse input in %r trained on dense data' %
          type(self).__name__)
    
  • Line 744, col. 8 in _get_liblinear_solver_type():

      raise ValueError(
          '`multi_class` must be one of `ovr`, `crammer_singer`, got %r' %
          multi_class)
    
  • Line 764, col. 4 in _get_liblinear_solver_type():

      raise ValueError(
          'Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r'
           % (error_string, penalty, loss, dual))
    
  • Line 870, col. 12 in _fit_liblinear():

      raise ValueError(
          'This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r'
           % classes_[0])
    
  • Line 887, col. 12 in _fit_liblinear():

      raise ValueError(
          'Intercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.'
           % intercept_scaling)
    

tests/test_metaestimators.py

  • Line 56, col. 16 in test_metaestimator_delegation():

      raise AttributeError('%r is hidden' % obj.hidden_method)
    

tests/test_common.py

tests/test_multioutput.py

tests/test_isotonic.py

tests/test_docstring_parameters.py

tests/__init__.py

tests/test_dummy.py

tests/test_check_build.py

tests/test_discriminant_analysis.py

tests/test_multiclass.py

tests/test_config.py

tests/test_kernel_ridge.py

tests/test_calibration.py

tests/test_naive_bayes.py

tests/test_base.py

tests/test_init.py

tests/test_impute.py

tests/test_pipeline.py

tests/test_random_projection.py

tests/test_kernel_approximation.py

tree/tree.py

  • Line 244, col. 12 in BaseDecisionTree():

      raise ValueError('max_leaf_nodes must be integral number but was %r' %
          max_leaf_nodes)
    
  • Line 297, col. 12 in BaseDecisionTree():

      raise ValueError("'presort' should be in {}. Got {!r} instead.".format(
          allowed_presort, self.presort))
    

tree/tests/test_tree.py

tree/tests/__init__.py

tree/tests/test_export.py

tree/__init__.py

tree/export.py

tree/setup.py

utils/optimize.py

utils/fixes.py

utils/deprecation.py

utils/estimator_checks.py

utils/multiclass.py

  • Line 96, col. 8 in unique_labels():

      raise ValueError('Unknown label type: %s' % repr(ys))
    
  • Line 171, col. 8 in check_classification_targets():

      raise ValueError('Unknown label type: %r' % y_type)
    
  • Line 242, col. 8 in type_of_target():

      raise ValueError(
          'Expected array-like (array or non-string sequence), got %r' % y)
    
  • Line 262, col. 12 in type_of_target():

      raise ValueError(
          'You appear to be using a legacy multi-label data representation. Sequence of sequences are no longer supported; use a binary array or sparse matrix instead.'
          )
    
  • Line 314, col. 16 in _check_partial_fit_first_call():

      raise ValueError(
          '`classes=%r` is not the same as on last call to partial_fit, was: %r' %
          (classes, clf.classes_))
    

utils/graph.py

utils/sparsetools/tests/__init__.py

utils/sparsetools/__init__.py

utils/sparsetools/setup.py

utils/tests/test_deprecation.py

utils/tests/test_bench.py

utils/tests/test_utils.py

utils/tests/test_metaestimators.py

utils/tests/test_seq_dataset.py

utils/tests/test_stats.py

utils/tests/test_validation.py

utils/tests/test_optimize.py

utils/tests/test_shortest_path.py

utils/tests/test_fast_dict.py

utils/tests/__init__.py

utils/tests/test_class_weight.py

utils/tests/test_estimator_checks.py

utils/tests/test_multiclass.py

utils/tests/test_fixes.py

utils/tests/test_sparsefuncs.py

utils/tests/test_murmurhash.py

utils/tests/test_linear_assignment.py

utils/tests/test_graph.py

utils/tests/test_extmath.py

utils/tests/test_random.py

utils/tests/test_testing.py

utils/__init__.py

  • Line 245, col. 8 in resample():

      raise ValueError('Unexpected kw arguments: %r' % options.keys())
    

utils/random.py

utils/bench.py

utils/_scipy_sparse_lsqr_backport.py

utils/setup.py

utils/_unittest_backport.py

  • Line 102, col. 20 in _AssertRaisesBaseContext():

      warnings.warn('%r is an invalid keyword argument for this function' % next(
          iter(kwargs)), DeprecationWarning, 3)
    

utils/mocking.py

utils/stats.py

utils/class_weight.py

  • Line 61, col. 12 in compute_class_weight():

      raise ValueError("class_weight must be dict, 'balanced', or None, got: %r" %
          class_weight)
    

utils/metaestimators.py

  • Line 63, col. 12 in _BaseComposition():

      raise ValueError('Names provided are not unique: {0!r}'.format(list(names)))
    
  • Line 67, col. 12 in _BaseComposition():

      raise ValueError('Estimator names conflict with constructor arguments: {0!r}'
          .format(sorted(invalid_names)))
    
  • Line 71, col. 12 in _BaseComposition():

      raise ValueError('Estimator names must not contain __: got {0!r}'.format(
          invalid_names))
    

utils/extmath.py

utils/testing.py

  • Line 341, col. 12 in _IgnoreWarnings():

      raise RuntimeError('Cannot enter %r twice' % self)
    
  • Line 351, col. 12 in _IgnoreWarnings():

      raise RuntimeError('Cannot exit %r without entering first' % self)
    
  • Line 391, col. 12 in assert_raise_message():

      raise AssertionError(
          'Error message does not include the expected string: %r. Observed error message: %r'
           % (message, error_message))
    
  • Line 655, col. 12 in all_estimators():

      raise ValueError(
          "Parameter type_filter must be 'classifier', 'regressor', 'transformer', 'cluster' or None, got %s."
           % repr(type_filter))
    

utils/arpack.py

utils/sparsefuncs.py

utils/linear_assignment_.py

utils/validation.py

  • Line 141, col. 12 in _num_samples():

      raise TypeError(
          'Singleton array %r cannot be considered a valid collection.' % x)
    
  • Line 226, col. 8 in check_consistent_length():

      raise ValueError(
          'Found input variables with inconsistent numbers of samples: %r' % [int
          (l) for l in lengths])
    
  • Line 492, col. 8 in check_array():

      raise ValueError(
          'force_all_finite should be a bool or "allow-nan". Got {!r} instead'.
          format(force_all_finite))
    
  • Line 569, col. 12 in check_array():

      raise ValueError(
          'Found array with %d sample(s) (shape=%s) while a minimum of %d is required%s.'
           % (n_samples, shape_repr, ensure_min_samples, context))
    
  • Line 577, col. 12 in check_array():

      raise ValueError(
          'Found array with %d feature(s) (shape=%s) while a minimum of %d is required%s.'
           % (n_features, shape_repr, ensure_min_features, context))
    
  • Line 803, col. 4 in check_random_state():

      raise ValueError(
          '%r cannot be used to seed a numpy.random.RandomState instance' % seed)
    
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment