df = pd.read_csv('data_v1_save.csv')
df.info()
df.tail()
df['Churn'].value_counts().plot(kind='bar')
df[['MultipleLines']].head()
df['MultipleLines'].value_counts()
pd.get_dummies(data=df, columns=['MultipleLines'])
df.select_dtypes('object').head(3)
cal_cols = df.select_dtypes('object').columns.values
cal_cols
df1 = pd.get_dummies(data=df, columns=cal_cols)
df1.info()
df1.head(3)
X = df1.drop('Churn', axis=1).values
y = df1['Churn'].values
x.shape, y.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=42)
X_train.shape
y_train.shape
df1.tail()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(0
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train[:2], y_train[:2]
from sklearn.metrics import accuracy_score
my_predictions = {}
colors = ['r', 'c', 'm', 'y', 'k', 'khaki', 'teal', 'orchid', 'sandybrown',
'greenyellow', 'dodgerblue', 'deepskyblue', 'rosybrown', 'firebrick',
'deeppink', 'crimson', 'salmon', 'darkred', 'olivedrab', 'olive',
'forestgreen', 'royalblue', 'indigo', 'navy', 'mediumpurple', 'chocolate',
'gold', 'darkorange', 'seagreen', 'turquoise', 'steelblue', 'slategray',
'peru', 'midnightblue', 'slateblue', 'dimgray', 'cadetblue', 'tomato'
]
def recall_eval(name_, pred, actual):
global predictions
global colors
plt.figure(figsize=(12, 9))
#acc = accuracy_score(actual, pred)
acc = recall_score(actual, pred)
my_predictions[name_] = acc * 100
y_value = sorted(my_predictions.items(), key=lambda x: x[1], reverse=True)
df = pd.DataFrame(y_value, columns=['model', 'recall'])
print(df)
length = len(df)
plt.figure(figsize=(10, length))
ax = plt.subplot()
ax.set_yticks(np.arange(len(df)))
ax.set_yticklabels(df['model'], fontsize=15)
bars = ax.barh(np.arange(len(df)), df['recall'])
for i, v in enumerate(df['recall']):
idx = np.random.choice(len(colors))
bars[i].set_color(colors[idx])
ax.text(v + 2, i, str(round(v, 3)), color='k', fontsize=15, fontweight='bold')
plt.title('recall', fontsize=18)
plt.xlim(0, 100)
plt.show()
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
lg = LogisticRegression()
lg.fit(X_train, y_train)
lg.score(X_test, y_test)
lg_pred = lg.predict(X_test)
confusion_matrix(y_test, lg_pred)
accuracy_score(y_test, lg_pred)
precision_score(y_test, lg_pred)
recall_score(y_test, lg_pred)
f1_score(y_test, lg_pred)
print(classification_report(y_test, lg_pred))
recall_eval('LogisticRegression', lg_pred, y_test)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
knn_pred = knn.predict(X_test)
recall_eval('K-Nearest Neighbor', knn_pred, y_test)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=10, random_state=42)
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
recall_eval('DecisionTree', dt_pred, y_test)
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=3, random_state=42)
rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
recall_eval('RandomForest Ensemble', rfc_pred, y_test)
<Deep Learning>
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
tf.random.set_seed(100)
batch_size = 16
epochs = 20
X_train.shape
y_train.shape
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(39,)))
model.add(Dense(3, activation='relu'))
model.add(Dense(1, activation='sigmoid')
model.summary()
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(39,)))
model.add(Dropout(0.3))
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(39,)))
model.add(Dropout(0.3))
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train,
validation_data=(X_test, y_test),
epochs=10,
batch_size=10)
model = Sequential()
model.add(Dense(5, activation='relu', input_shape=(39,)))
model.add(Dropout(0.3))
model.add(Dense(4, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
epochs=20,
batch_size=16)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stop = EarlyStopping(monitor='val_loss', mode='min',
verbose=1, patience=5)
check_point = ModelCheckpoint('best_model.h5', verbose=1,
monitor='val_loss', mode='min', save_best_only=True)
history = model.fit(x=X_train, y=y_train,
epochs=50 , batch_size=20,
validation_data=(X_test, y_test), verbose=1,
callbacks=[early_stop, check_point])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend(['acc', 'val_acc'])
plt.show()
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
pred = model.predict(X_test)
pred.shape
y_pred = np.argmax(pred, axis=1)
accuracy_score(y_test, y_pred)
recall_score(y_test, y_pred)
print(classification_report(y_test, y_pred))