diff --git a/dask_ml/compose/_column_transformer.py b/dask_ml/compose/_column_transformer.py index e5344a54a..f2abdf7b4 100644 --- a/dask_ml/compose/_column_transformer.py +++ b/dask_ml/compose/_column_transformer.py @@ -135,7 +135,7 @@ class ColumnTransformer(sklearn.compose.ColumnTransformer): Examples -------- - >>> from dask_ml.compose import ColumnTransformer + >>> from dask.ml.compose import ColumnTransformer >>> from sklearn.preprocessing import Normalizer >>> ct = ColumnTransformer( ... [("norm1", Normalizer(norm='l1'), [0, 1]), diff --git a/dask_ml/datasets.py b/dask_ml/datasets.py index a561ee0d5..13c6313ed 100644 --- a/dask_ml/datasets.py +++ b/dask_ml/datasets.py @@ -142,7 +142,7 @@ def make_blobs( Examples -------- - >>> from dask_ml.datasets import make_blobs + >>> from dask.ml.datasets import make_blobs >>> X, y = make_blobs(n_samples=100000, chunks=10000) >>> X dask.array<..., shape=(100000, 2), dtype=float64, chunksize=(10000, 2)> diff --git a/dask_ml/decomposition/pca.py b/dask_ml/decomposition/pca.py index 3cf5133c9..50c060ca7 100644 --- a/dask_ml/decomposition/pca.py +++ b/dask_ml/decomposition/pca.py @@ -144,7 +144,7 @@ class PCA(sklearn.decomposition.PCA): -------- >>> import numpy as np >>> import dask.array as da - >>> from dask_ml.decomposition import PCA + >>> from dask.ml.decomposition import PCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> dX = da.from_array(X, chunks=X.shape) >>> pca = PCA(n_components=2) diff --git a/dask_ml/decomposition/truncated_svd.py b/dask_ml/decomposition/truncated_svd.py index a9c0b2be8..f94fd6112 100644 --- a/dask_ml/decomposition/truncated_svd.py +++ b/dask_ml/decomposition/truncated_svd.py @@ -100,7 +100,7 @@ def __init__( Examples -------- - >>> from dask_ml.decomposition import TruncatedSVD + >>> from dask.ml.decomposition import TruncatedSVD >>> import dask.array as da >>> X = da.random.normal(size=(1000, 20), chunks=(100, 20)) >>> svd = TruncatedSVD(n_components=5, n_iter=3, random_state=42) diff --git a/dask_ml/feature_extraction/text.py b/dask_ml/feature_extraction/text.py index a647ddee7..105c2a125 100644 --- a/dask_ml/feature_extraction/text.py +++ b/dask_ml/feature_extraction/text.py @@ -142,7 +142,7 @@ class CountVectorizer(sklearn.feature_extraction.text.CountVectorizer): The Dask-ML implementation currently requires that ``raw_documents`` is a :class:`dask.bag.Bag` of documents (lists of strings). - >>> from dask_ml.feature_extraction.text import CountVectorizer + >>> from dask.ml.feature_extraction.text import CountVectorizer >>> import dask.bag as db >>> from distributed import Client >>> client = Client() diff --git a/dask_ml/metrics/classification.py b/dask_ml/metrics/classification.py index d5f113f81..c1868b0f2 100644 --- a/dask_ml/metrics/classification.py +++ b/dask_ml/metrics/classification.py @@ -60,7 +60,7 @@ def accuracy_score( -------- >>> import dask.array as da >>> import numpy as np - >>> from dask_ml.metrics import accuracy_score + >>> from dask.ml.metrics import accuracy_score >>> y_pred = da.from_array(np.array([0, 2, 1, 3]), chunks=2) >>> y_true = da.from_array(np.array([0, 1, 2, 3]), chunks=2) >>> accuracy_score(y_true, y_pred) diff --git a/dask_ml/model_selection/_hyperband.py b/dask_ml/model_selection/_hyperband.py index 17ebe2797..535e888fe 100644 --- a/dask_ml/model_selection/_hyperband.py +++ b/dask_ml/model_selection/_hyperband.py @@ -157,8 +157,8 @@ class HyperbandSearchCV(BaseIncrementalSearchCV): Examples -------- >>> import numpy as np - >>> from dask_ml.model_selection import HyperbandSearchCV - >>> from dask_ml.datasets import make_classification + >>> from dask.ml.model_selection import HyperbandSearchCV + >>> from dask.ml.datasets import make_classification >>> from sklearn.linear_model import SGDClassifier >>> >>> X, y = make_classification(chunks=20) diff --git a/dask_ml/model_selection/_incremental.py b/dask_ml/model_selection/_incremental.py index 180c2a23d..e42a0b605 100644 --- a/dask_ml/model_selection/_incremental.py +++ b/dask_ml/model_selection/_incremental.py @@ -411,7 +411,7 @@ async def fit( Examples -------- >>> import numpy as np - >>> from dask_ml.datasets import make_classification + >>> from dask.ml.datasets import make_classification >>> X, y = make_classification(n_samples=5000000, n_features=20, ... chunks=100000, random_state=0) @@ -443,7 +443,7 @@ async def fit( >>> from dask.distributed import Client >>> client = Client(processes=False) - >>> from dask_ml.model_selection._incremental import fit + >>> from dask.ml.model_selection._incremental import fit >>> info, models, history, best = fit(model, params, ... X_train, y_train, ... X_test, y_test, @@ -920,7 +920,7 @@ class IncrementalSearchCV(BaseIncrementalSearchCV): >>> from dask.distributed import Client >>> client = Client() >>> import numpy as np - >>> from dask_ml.datasets import make_classification + >>> from dask.ml.datasets import make_classification >>> X, y = make_classification(n_samples=5000000, n_features=20, ... chunks=100000, random_state=0) diff --git a/dask_ml/model_selection/_split.py b/dask_ml/model_selection/_split.py index 7ccd368a7..b4943ac05 100644 --- a/dask_ml/model_selection/_split.py +++ b/dask_ml/model_selection/_split.py @@ -407,7 +407,7 @@ def train_test_split( Examples -------- >>> import dask.array as da - >>> from dask_ml.datasets import make_regression + >>> from dask.ml.datasets import make_regression >>> X, y = make_regression(n_samples=125, n_features=4, chunks=50, ... random_state=0) diff --git a/dask_ml/naive_bayes.py b/dask_ml/naive_bayes.py index c0c180fbd..0bf8593c7 100644 --- a/dask_ml/naive_bayes.py +++ b/dask_ml/naive_bayes.py @@ -11,8 +11,8 @@ class GaussianNB(BaseEstimator): Examples -------- - >>> from dask_ml import datasets - >>> from dask_ml.naive_bayes import GaussianNB + >>> from dask.ml import datasets + >>> from dask.ml.naive_bayes import GaussianNB >>> X, y = datasets.make_classification(chunks=50) >>> gnb = GaussianNB() >>> gnb.fit(X, y) diff --git a/dask_ml/preprocessing/_block_transformer.py b/dask_ml/preprocessing/_block_transformer.py index a578898ff..4317b4804 100644 --- a/dask_ml/preprocessing/_block_transformer.py +++ b/dask_ml/preprocessing/_block_transformer.py @@ -37,7 +37,7 @@ class BlockTransformer(BaseEstimator, TransformerMixin): >>> import dask.datasets >>> import pandas as pd - >>> from dask_ml.preprocessing import BlockTransformer + >>> from dask.ml.preprocessing import BlockTransformer >>> df = dask.datasets.timeseries() >>> df ... # doctest: +SKIP diff --git a/dask_ml/preprocessing/_encoders.py b/dask_ml/preprocessing/_encoders.py index 17e5c0954..615d9d8c0 100644 --- a/dask_ml/preprocessing/_encoders.py +++ b/dask_ml/preprocessing/_encoders.py @@ -89,7 +89,7 @@ class OneHotEncoder(DaskMLBaseMixin, sklearn.preprocessing.OneHotEncoder): Given a dataset with two features, we let the encoder find the unique values per feature and transform the data to a binary one-hot encoding. - >>> from dask_ml.preprocessing import OneHotEncoder + >>> from dask.ml.preprocessing import OneHotEncoder >>> import numpy as np >>> import dask.array as da >>> enc = OneHotEncoder() diff --git a/dask_ml/preprocessing/label.py b/dask_ml/preprocessing/label.py index fea3bd08e..961202a3f 100644 --- a/dask_ml/preprocessing/label.py +++ b/dask_ml/preprocessing/label.py @@ -46,7 +46,7 @@ class LabelEncoder(sklearn.preprocessing.LabelEncoder): -------- `LabelEncoder` can be used to normalize labels. - >>> from dask_ml import preprocessing + >>> from dask.ml import preprocessing >>> le = preprocessing.LabelEncoder() >>> le.fit([1, 2, 2, 6]) LabelEncoder() diff --git a/dask_ml/wrappers.py b/dask_ml/wrappers.py index 1d70f09c0..f25927078 100644 --- a/dask_ml/wrappers.py +++ b/dask_ml/wrappers.py @@ -436,8 +436,8 @@ class Incremental(ParallelPostFit): Examples -------- - >>> from dask_ml.wrappers import Incremental - >>> from dask_ml.datasets import make_classification + >>> from dask.ml.wrappers import Incremental + >>> from dask.ml.datasets import make_classification >>> import sklearn.linear_model >>> X, y = make_classification(chunks=25) >>> est = sklearn.linear_model.SGDClassifier()