X=df.drop(columns="target") # features
y=df["target"] # target
# features scaling
scaler=StandardScaler() # scaler instance
X=scaler.fit_transform(X) # scaling features
# train-test data sets
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 7135, stratify = y)
logR=LogisticRegression() # model instance with default parameters
logR.fit(X_train, y_train) # training or fitting
print("mean cross-validation score with default parameters:",cross_val_score(logR,X_train,y_train,cv=5).mean())
print("\n <<< Grid Search >>> \n")
grid_values = {'penalty': ['l1', 'l2'],'C':[1, 0.1, 10, 100, 1000]}
logR=LogisticRegression() # model instance
grid = GridSearchCV(logR, grid_values, cv=5, scoring='accuracy')
grid.fit(X_train, y_train)
print("best parameters:", grid.best_params_)
print("\nmean cross-validation score with optimal parameters:",cross_val_score(grid,X_train,y_train,cv=5).mean())