In [1]:
import sklearn
import pandas as pd
import numpy as np
import scipy
In [2]:
sklearn.__version__, pd.__version__, np.__version__, scipy.__version__
Out[2]:
('0.23.1', '1.0.5', '1.19.0', '1.5.1')

Custom dataframe transformer

In [3]:
from sklearn.pipeline import Pipeline

class DataframeFunctionTransformer():
    def __init__(self, func):
        self.func = func

    def transform(self, input_df, **transform_params):
        return self.func(input_df)

    def fit(self, X, y=None, **fit_params):
        return self
In [4]:
def process_dataframe(input_df):
    
    input_df["text"] = input_df["text"].map(lambda t: t.upper())
    
    return input_df
In [5]:
df = pd.DataFrame({
    "id":[1,2,3,4],
    "text":["foo","Bar","BAz","quux"]
})
In [6]:
df
Out[6]:
id text
0 1 foo
1 2 Bar
2 3 BAz
3 4 quux
In [7]:
pipeline = Pipeline([
    ("lowercase", DataframeFunctionTransformer(process_dataframe))
])
In [8]:
pipeline.fit_transform(df)
Out[8]:
id text
0 1 FOO
1 2 BAR
2 3 BAZ
3 4 QUUX

sparse to dense matrix

In [9]:
from sklearn.decomposition import PCA

from sklearn.tree import DecisionTreeClassifier
from sklearn.base import TransformerMixin,BaseEstimator

from sklearn.pipeline import Pipeline
In [10]:
data = scipy.sparse.csr_matrix([
    [1.,0.,0.,0.,0.,0.],
    [0.,1.,0.,0.,0.,0.],
    [1.,0.,0.,0.,0.,0.],
    [0.,0.,0.,0.,1.,0.],
    [0.,0.,0.,1.,0.,0.],
    [1.,0.,0.,0.,0.,0.],
    [1.,1.,0.,0.,0.,0.],
    [1.,1.,0.,0.,0.,0.],
])

target = np.array([1,1,1,0,0,0,1,1])
In [11]:
class ToDenseTransformer():
    
    # here you define the operation it should perform
    def transform(self, X, y=None, **fit_params):
        return X.todense()

    # just return self
    def fit(self, X, y=None, **fit_params):
        return self

# need to make matrices dense because PCA does not work with sparse vectors.
pipeline = Pipeline([
    ('to_dense',ToDenseTransformer()),
    ('pca',PCA()),
    ('clf',DecisionTreeClassifier())
])

pipeline.fit(data,target)
pipeline.predict(data)
Out[11]:
array([1, 1, 1, 0, 0, 1, 1, 1])

missing imputation

In [12]:
import pandas as pd

from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
In [13]:
df = pd.DataFrame({
    'name':['alice','bob','charlie','david','edward'],
    'age':[24,32,np.nan,38,20]
})

df.head()
Out[13]:
name age
0 alice 24.0
1 bob 32.0
2 charlie NaN
3 david 38.0
4 edward 20.0
In [14]:
transformer_step = ColumnTransformer([
        ('impute_mean', SimpleImputer(strategy='mean'), ['age'])
    ], remainder='passthrough')
In [15]:
pipe = Pipeline([
    ('transformer', transformer_step)
])
In [16]:
pipe.fit(df)

pd.DataFrame(
    data=pipe.transform(df),
    columns=['age', 'name']
)[["name","age"]]
Out[16]:
name age
0 alice 24
1 bob 32
2 charlie 28.5
3 david 38
4 edward 20

ColumnTransformer with OneHotEncoder

In [17]:
import numpy as np
import pandas as pd

from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeClassifier
In [18]:
df = pd.DataFrame({
    'favorite_color':['blue','green','red','green','blue'],
    'age': [10,15,10,np.nan,10],
    'target':[1,0,1,0,1]
})
In [19]:
df
Out[19]:
favorite_color age target
0 blue 10.0 1
1 green 15.0 0
2 red 10.0 1
3 green NaN 0
4 blue 10.0 1
In [20]:
# define individual transformers in a pipeline
categorical_preprocessing = Pipeline([('ohe', OneHotEncoder())])
numerical_preprocessing = Pipeline([('imputation', SimpleImputer())])

# define which transformer applies to which columns
preprocess = ColumnTransformer([
    ('categorical_preprocessing', categorical_preprocessing, ['favorite_color']),
    ('numerical_preprocessing', numerical_preprocessing, ['age'])
])

# create the final pipeline with preprocessing steps and 
# the final classifier step
pipeline = Pipeline([
    ('preprocess', preprocess),
    ('clf', DecisionTreeClassifier())
])

# now fit the pipeline using the whole dataframe
df_features = df[['favorite_color','age']]
df_target = df['target']

pipeline.fit(df_features, df_target)
Out[20]:
Pipeline(steps=[('preprocess',
                 ColumnTransformer(transformers=[('categorical_preprocessing',
                                                  Pipeline(steps=[('ohe',
                                                                   OneHotEncoder())]),
                                                  ['favorite_color']),
                                                 ('numerical_preprocessing',
                                                  Pipeline(steps=[('imputation',
                                                                   SimpleImputer())]),
                                                  ['age'])])),
                ('clf', DecisionTreeClassifier())])

Select columns with Custom Transformer

In [21]:
import pandas as pd

from sklearn.base import TransformerMixin,BaseEstimator
from sklearn.pipeline import Pipeline
In [22]:
class SelectColumnsTransfomer():
    def __init__(self, columns=None):
        self.columns = columns

    def transform(self, X, **transform_params):
        cpy_df = X[self.columns].copy()
        return cpy_df

    def fit(self, X, y=None, **fit_params):
        return self
In [23]:
df = pd.DataFrame({
    'name':['alice','bob','charlie','david','edward'],
    'age':[24,32,np.nan,38,20]
})

df.head()
Out[23]:
name age
0 alice 24.0
1 bob 32.0
2 charlie NaN
3 david 38.0
4 edward 20.0
In [24]:
pipe = Pipeline([
    ('selector', SelectColumnsTransfomer(["name"]))
])
In [25]:
pipe.fit_transform(df)
Out[25]:
name
0 alice
1 bob
2 charlie
3 david
4 edward

Function Transformer with Parameters

In [29]:
import pandas as pd

from nltk.stem import RSLPStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import FunctionTransformer

def stem_str(input_series, stemmer):
    
    def stem(input_str):
        return " ".join([stemmer.stem(t) for t in input_str.split(" ")]).strip()
    
    return input_series.apply(stem)

pipeline = Pipeline([
    ('stemmer', FunctionTransformer(
        func=stem_str, 
        kw_args={'stemmer': RSLPStemmer()})),
    ('vect', TfidfVectorizer()),
    ('clf', LogisticRegression())
])
In [30]:
df = pd.DataFrame({
    'text':[
        'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
        'Sed accumsan congue enim non pretium.',
        'In hac habitasse platea dictumst.',
        'Sed tincidunt ipsum nec urna vulputate luctus.'
    ],
    'target':[0, 1, 0, 1]
})
In [34]:
df
Out[34]:
text target
0 Lorem ipsum dolor sit amet, consectetur adipis... 0
1 Sed accumsan congue enim non pretium. 1
2 In hac habitasse platea dictumst. 0
3 Sed tincidunt ipsum nec urna vulputate luctus. 1
In [31]:
pipeline.fit(df['text'],df['target'])
Out[31]:
Pipeline(steps=[('stemmer',
                 FunctionTransformer(func=<function stem_str at 0x7f2a259ffe18>,
                                     kw_args={'stemmer': <nltk.stem.rslp.RSLPStemmer object at 0x7f2a25429630>})),
                ('vect', TfidfVectorizer()), ('clf', LogisticRegression())])
In [33]:
pipeline.predict(df['text'])
Out[33]:
array([0, 1, 0, 1])
In [ ]: