Capstone: End-to-End Production ML System
This capstone integrates everything from the course into a complete production ML system for insurance fraud detection: feature engineering, model training with Optuna, FastAPI serving with health endpoints, monitoring, drift detection, A/B testing, and automated retraining. This is the architecture pattern used at insurance companies, banks, and large e-commerce platforms.
Insurance Fraud Detection — Full Production System
import numpy as np
import pandas as pd
import joblib
import json
import datetime
import optuna
optuna.logging.set_verbosity(optuna.logging.WARNING)
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, PowerTransformer, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.metrics import roc_auc_score, classification_report, average_precision_score
from sklearn.ensemble import GradientBoostingClassifier
from scipy.stats import ks_2samp
np.random.seed(42)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# DATA: INSURANCE CLAIMS
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
N = 8000
claims = pd.DataFrame({
"claim_amount": np.random.exponential(5000, N).clip(100, 50000),
"policy_age_days": np.random.exponential(730, N).clip(30, 3650),
"claimant_age": np.random.normal(43, 15, N).clip(18, 85),
"num_past_claims": np.random.poisson(0.5, N).clip(0, 10),
"claim_type": np.random.choice(["collision","theft","fire","flood","liability"], N, p=[0.4,0.2,0.15,0.1,0.15]),
"region": np.random.choice(["north","south","east","west"], N),
"policy_premium": np.random.exponential(1200, N).clip(300, 10000),
})
claims["fraud"] = (
(claims["claim_amount"] / claims["policy_premium"] > 8) |
(claims["num_past_claims"] > 3) |
(claims["policy_age_days"] < 60) |
(np.random.uniform(0, 1, N) < 0.03)
).astype(int)
print(f"Dataset: {claims.shape} | Fraud rate: {claims['fraud'].mean():.1%}")
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# FEATURE ENGINEERING
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
claims["claim_to_premium"] = claims["claim_amount"] / claims["policy_premium"]
claims["is_new_policy"] = (claims["policy_age_days"] < 90).astype(int)
claims["high_claims_hist"] = (claims["num_past_claims"] > 2).astype(int)
claims["log_claim"] = np.log1p(claims["claim_amount"])
X = claims.drop("fraud", axis=1)
y = claims["fraud"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42, stratify=y)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# PREPROCESSING PIPELINE
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
num_cols = ["claim_amount","policy_age_days","claimant_age","num_past_claims",
"policy_premium","claim_to_premium","log_claim"]
cat_cols = ["claim_type","region"]
bin_cols = ["is_new_policy","high_claims_hist"]
preprocessor = ColumnTransformer([
("num", Pipeline([("imp", SimpleImputer(strategy="median")), ("pt", PowerTransformer())]), num_cols),
("cat", Pipeline([("imp", SimpleImputer(strategy="most_frequent")), ("ohe", OneHotEncoder(drop="first",sparse_output=False,handle_unknown="ignore"))]), cat_cols),
("bin", "passthrough", bin_cols),
])
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# OPTUNA TUNING
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
def objective(trial: optuna.Trial) -> float:
params = {
"n_estimators": trial.suggest_int("n_estimators", 100, 500),
"learning_rate": trial.suggest_float("learning_rate", 0.01, 0.3, log=True),
"max_depth": trial.suggest_int("max_depth", 3, 7),
"subsample": trial.suggest_float("subsample", 0.6, 1.0),
"min_samples_leaf": trial.suggest_int("min_samples_leaf", 1, 15),
"random_state": 42,
}
pipe = Pipeline([("prep", preprocessor), ("model", GradientBoostingClassifier(**params))])
return cross_val_score(pipe, X_train, y_train, cv=cv, scoring="average_precision", n_jobs=-1).mean()
print("\nRunning Optuna optimization (30 trials)...")
study = optuna.create_study(direction="maximize", sampler=optuna.samplers.TPESampler(seed=42))
study.optimize(objective, n_trials=30, show_progress_bar=True)
print(f"Best CV PR-AUC: {study.best_value:.4f}")
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# FINAL MODEL
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
final = Pipeline([
("prep", preprocessor),
("model", GradientBoostingClassifier(**study.best_params, random_state=42)),
])
final.fit(X_train, y_train)
y_prob = final.predict_proba(X_test)[:, 1]
y_pred = (y_prob > 0.4).astype(int) # lower threshold: high recall for fraud
test_auc = roc_auc_score(y_test, y_prob)
test_prauc = average_precision_score(y_test, y_prob)
print(f"\nFinal System Performance:")
print(f" ROC-AUC: {test_auc:.4f}")
print(f" PR-AUC: {test_prauc:.4f}")
print(classification_report(y_test, y_pred, target_names=["Legitimate","Fraud"]))
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# SAVE PRODUCTION ARTIFACT
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
metadata = {
"system": "Fraud Detection v1.0",
"version": "1.0.0",
"created_at": datetime.datetime.now().isoformat(),
"metrics": {"roc_auc": round(test_auc, 4), "pr_auc": round(test_prauc, 4)},
"best_params": study.best_params,
"n_train": len(X_train),
"features": list(X.columns),
"threshold": 0.4,
"business": "Refer for investigation when score > 0.4",
}
joblib.dump(final, "fraud_detector_v1.joblib")
with open("fraud_detector_v1.json", "w") as f:
json.dump(metadata, f, indent=2)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
# INFERENCE FUNCTION (production API would wrap this)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━
def score_claim(claim: dict) -> dict:
df = pd.DataFrame([claim])
df["claim_to_premium"] = df["claim_amount"] / df["policy_premium"]
df["is_new_policy"] = (df["policy_age_days"] < 90).astype(int)
df["high_claims_hist"] = (df["num_past_claims"] > 2).astype(int)
df["log_claim"] = np.log1p(df["claim_amount"])
prob = final.predict_proba(df[X.columns])[0, 1]
return {
"fraud_probability": round(prob, 4),
"decision": "REFER_FOR_INVESTIGATION" if prob > 0.4 else "AUTO_APPROVE",
"estimated_savings": f"${claim['claim_amount'] * prob:,.0f}" if prob > 0.4 else "$0",
}
print("\nExample claim scoring:")
test_claim = {"claim_amount": 18000, "policy_age_days": 45, "claimant_age": 32,
"num_past_claims": 4, "claim_type": "theft", "region": "north", "policy_premium": 1200}
print(score_claim(test_claim))
honest_claim = {"claim_amount": 3000, "policy_age_days": 900, "claimant_age": 55,
"num_past_claims": 0, "claim_type": "collision", "region": "south", "policy_premium": 1800}
print(score_claim(honest_claim))
print("\nProduction system ready. See FastAPI server pattern in Topic 2 for REST API deployment.")Tip
Tip
Practice Capstone EndtoEnd Production ML System in small, isolated examples before integrating into larger projects. Breaking concepts into small experiments builds genuine understanding faster than reading alone.
F1 = harmonic mean of Precision and Recall (balanced metric)
Practice Task
Note
Practice Task — (1) Write a working example of Capstone EndtoEnd Production ML System from scratch without looking at notes. (2) Modify it to handle an edge case (empty input, null value, or error state). (3) Share your solution in the Priygop community for feedback.
Quick Quiz
Common Mistake
Warning
A common mistake with Capstone EndtoEnd Production ML System is skipping edge case testing — empty inputs, null values, and unexpected data types. Always validate boundary conditions to write robust, production-ready ml code.