Spaces:
Running
Running
Pulastya B commited on
Commit ·
1ca2e0e
1
Parent(s): 2cf9e11
Fixed Bugs where the SSE Streaming was improper added support for Auto Gluon, Fixed instances where stale schemas was causing EDA plots generation to fail
Browse files- examples/titanic_example.py +0 -166
- idx.html +0 -1281
- render.yaml +0 -37
- requirements.txt +16 -0
- src/api/app.py +17 -4
- src/orchestrator.py +226 -34
- src/tools/__init__.py +40 -8
- src/tools/advanced_insights.py +47 -13
- src/tools/advanced_preprocessing.py +11 -1
- src/tools/advanced_training.py +39 -2
- src/tools/agent_tool_mapping.py +9 -0
- src/tools/auto_pipeline.py +55 -1
- src/tools/autogluon_training.py +1480 -0
- src/tools/business_intelligence.py +62 -1
- src/tools/code_interpreter.py +6 -2
- src/tools/data_cleaning.py +72 -1
- src/tools/data_profiling.py +105 -0
- src/tools/eda_reports.py +97 -0
- src/tools/feature_engineering.py +2 -2
- src/tools/nlp_text_analytics.py +42 -14
- src/tools/production_mlops.py +191 -0
- src/tools/time_series.py +52 -1
- src/tools/tools_registry.py +467 -0
- src/tools/visualization_engine.py +1 -1
- src/utils/schema_extraction.py +16 -2
- src/utils/validation.py +107 -0
- test_data/sample.csv +0 -16
examples/titanic_example.py
DELETED
|
@@ -1,166 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Titanic Example - Demonstrating the complete Data Science Copilot workflow
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import sys
|
| 6 |
-
import os
|
| 7 |
-
from pathlib import Path
|
| 8 |
-
|
| 9 |
-
# Add src to path
|
| 10 |
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
|
| 11 |
-
|
| 12 |
-
from orchestrator import DataScienceCopilot
|
| 13 |
-
from rich.console import Console
|
| 14 |
-
from rich.panel import Panel
|
| 15 |
-
|
| 16 |
-
console = Console()
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def main():
|
| 20 |
-
"""
|
| 21 |
-
Complete example using the Titanic dataset.
|
| 22 |
-
|
| 23 |
-
This demonstrates the full workflow:
|
| 24 |
-
1. Dataset profiling
|
| 25 |
-
2. Quality issue detection
|
| 26 |
-
3. Data cleaning
|
| 27 |
-
4. Feature engineering
|
| 28 |
-
5. Model training
|
| 29 |
-
6. Report generation
|
| 30 |
-
"""
|
| 31 |
-
|
| 32 |
-
console.print(Panel.fit(
|
| 33 |
-
"🚢 Titanic Survival Prediction - Complete Workflow Example",
|
| 34 |
-
style="bold blue"
|
| 35 |
-
))
|
| 36 |
-
|
| 37 |
-
# Setup
|
| 38 |
-
titanic_path = "./data/titanic.csv"
|
| 39 |
-
|
| 40 |
-
# Check if dataset exists
|
| 41 |
-
if not Path(titanic_path).exists():
|
| 42 |
-
console.print("\n[yellow]⚠ Titanic dataset not found at ./data/titanic.csv[/yellow]")
|
| 43 |
-
console.print("[yellow]Please download it from: https://www.kaggle.com/c/titanic/data[/yellow]")
|
| 44 |
-
console.print("[yellow]Or place your own CSV file in the data directory[/yellow]\n")
|
| 45 |
-
|
| 46 |
-
# Use a sample path instead
|
| 47 |
-
console.print("[blue]Using sample dataset path for demonstration...[/blue]\n")
|
| 48 |
-
titanic_path = "your_dataset.csv" # User should replace this
|
| 49 |
-
|
| 50 |
-
# Initialize copilot
|
| 51 |
-
console.print("\n[bold]Step 1: Initialize Data Science Copilot[/bold]")
|
| 52 |
-
try:
|
| 53 |
-
copilot = DataScienceCopilot(reasoning_effort="medium")
|
| 54 |
-
console.print("[green]✓ Copilot initialized successfully[/green]")
|
| 55 |
-
except Exception as e:
|
| 56 |
-
console.print(f"[red]✗ Error: {e}[/red]")
|
| 57 |
-
console.print("[yellow]Make sure to set GROQ_API_KEY in .env file[/yellow]")
|
| 58 |
-
return
|
| 59 |
-
|
| 60 |
-
# Define the task
|
| 61 |
-
task_description = """
|
| 62 |
-
Analyze the Titanic dataset and build a model to predict passenger survival.
|
| 63 |
-
|
| 64 |
-
Key objectives:
|
| 65 |
-
1. Understand the data structure and identify quality issues
|
| 66 |
-
2. Handle missing values appropriately
|
| 67 |
-
3. Engineer relevant features from available data (e.g., family size, titles from names)
|
| 68 |
-
4. Train and compare multiple baseline models
|
| 69 |
-
5. Identify the most important features for prediction
|
| 70 |
-
6. Provide recommendations for improvement
|
| 71 |
-
|
| 72 |
-
Target: Achieve competitive performance (aim for 50-70th percentile on Kaggle leaderboard)
|
| 73 |
-
"""
|
| 74 |
-
|
| 75 |
-
target_column = "Survived"
|
| 76 |
-
|
| 77 |
-
console.print("\n[bold]Step 2: Run Complete Analysis Workflow[/bold]")
|
| 78 |
-
console.print(f"Dataset: {titanic_path}")
|
| 79 |
-
console.print(f"Target: {target_column}")
|
| 80 |
-
console.print(f"Task: Predict passenger survival\n")
|
| 81 |
-
|
| 82 |
-
# Run analysis
|
| 83 |
-
try:
|
| 84 |
-
result = copilot.analyze(
|
| 85 |
-
file_path=titanic_path,
|
| 86 |
-
task_description=task_description,
|
| 87 |
-
target_col=target_column,
|
| 88 |
-
use_cache=True,
|
| 89 |
-
max_iterations=15 # Allow more iterations for complex workflow
|
| 90 |
-
)
|
| 91 |
-
|
| 92 |
-
# Display results
|
| 93 |
-
if result["status"] == "success":
|
| 94 |
-
console.print("\n[green]✓ Analysis Complete![/green]\n")
|
| 95 |
-
|
| 96 |
-
# Display summary
|
| 97 |
-
console.print(Panel(
|
| 98 |
-
result["summary"],
|
| 99 |
-
title="📋 Final Analysis Summary",
|
| 100 |
-
border_style="green"
|
| 101 |
-
))
|
| 102 |
-
|
| 103 |
-
# Display workflow steps
|
| 104 |
-
console.print("\n[bold]🔧 Workflow Steps Executed:[/bold]")
|
| 105 |
-
for i, step in enumerate(result["workflow_history"], 1):
|
| 106 |
-
tool = step["tool"]
|
| 107 |
-
success = step["result"].get("success", False)
|
| 108 |
-
icon = "✓" if success else "✗"
|
| 109 |
-
color = "green" if success else "red"
|
| 110 |
-
console.print(f"{i}. [{color}]{icon}[/{color}] {tool}")
|
| 111 |
-
|
| 112 |
-
# Display statistics
|
| 113 |
-
console.print(f"\n[bold]📊 Execution Statistics:[/bold]")
|
| 114 |
-
console.print(f" Total Iterations: {result['iterations']}")
|
| 115 |
-
console.print(f" API Calls Made: {result['api_calls']}")
|
| 116 |
-
console.print(f" Execution Time: {result['execution_time']}s")
|
| 117 |
-
|
| 118 |
-
# Check for trained models
|
| 119 |
-
console.print("\n[bold]🤖 Model Training Results:[/bold]")
|
| 120 |
-
for step in result["workflow_history"]:
|
| 121 |
-
if step["tool"] == "train_baseline_models":
|
| 122 |
-
if step["result"].get("success"):
|
| 123 |
-
models_result = step["result"]["result"]
|
| 124 |
-
best_model = models_result.get("best_model", {})
|
| 125 |
-
console.print(f" Best Model: {best_model.get('name')}")
|
| 126 |
-
console.print(f" Score: {best_model.get('score'):.4f}")
|
| 127 |
-
console.print(f" Model Path: {best_model.get('model_path')}")
|
| 128 |
-
|
| 129 |
-
# Save results
|
| 130 |
-
output_file = "./outputs/reports/titanic_analysis.json"
|
| 131 |
-
Path(output_file).parent.mkdir(parents=True, exist_ok=True)
|
| 132 |
-
|
| 133 |
-
import json
|
| 134 |
-
with open(output_file, "w") as f:
|
| 135 |
-
json.dump(result, f, indent=2)
|
| 136 |
-
|
| 137 |
-
console.print(f"\n[cyan]💾 Full results saved to: {output_file}[/cyan]")
|
| 138 |
-
|
| 139 |
-
# Next steps
|
| 140 |
-
console.print("\n[bold]🎯 Next Steps:[/bold]")
|
| 141 |
-
console.print(" 1. Review the generated models in ./outputs/models/")
|
| 142 |
-
console.print(" 2. Check data quality reports in ./outputs/reports/")
|
| 143 |
-
console.print(" 3. Examine cleaned datasets in ./outputs/data/")
|
| 144 |
-
console.print(" 4. Use the best model for predictions on new data")
|
| 145 |
-
|
| 146 |
-
elif result["status"] == "error":
|
| 147 |
-
console.print(f"\n[red]✗ Analysis failed: {result['error']}[/red]")
|
| 148 |
-
console.print(f"Error type: {result['error_type']}")
|
| 149 |
-
|
| 150 |
-
else:
|
| 151 |
-
console.print(f"\n[yellow]⚠ Analysis incomplete: {result.get('message')}[/yellow]")
|
| 152 |
-
|
| 153 |
-
except Exception as e:
|
| 154 |
-
console.print(f"\n[red]✗ Unexpected error: {e}[/red]")
|
| 155 |
-
import traceback
|
| 156 |
-
console.print(traceback.format_exc())
|
| 157 |
-
|
| 158 |
-
# Cache statistics
|
| 159 |
-
console.print("\n[bold]📦 Cache Statistics:[/bold]")
|
| 160 |
-
cache_stats = copilot.get_cache_stats()
|
| 161 |
-
console.print(f" Valid Entries: {cache_stats['valid_entries']}")
|
| 162 |
-
console.print(f" Cache Size: {cache_stats['size_mb']} MB")
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
if __name__ == "__main__":
|
| 166 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
idx.html
DELETED
|
@@ -1,1281 +0,0 @@
|
|
| 1 |
-
<!DOCTYPE html>
|
| 2 |
-
<html lang="en">
|
| 3 |
-
<head>
|
| 4 |
-
<meta charset="UTF-8">
|
| 5 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
-
<title>Cute Lamp Login - Ultimate Edition</title>
|
| 7 |
-
<style>
|
| 8 |
-
* {
|
| 9 |
-
margin: 0;
|
| 10 |
-
padding: 0;
|
| 11 |
-
box-sizing: border-box;
|
| 12 |
-
}
|
| 13 |
-
|
| 14 |
-
body {
|
| 15 |
-
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 16 |
-
background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%);
|
| 17 |
-
min-height: 100vh;
|
| 18 |
-
display: flex;
|
| 19 |
-
flex-direction: column;
|
| 20 |
-
align-items: center;
|
| 21 |
-
justify-content: center;
|
| 22 |
-
overflow-x: hidden;
|
| 23 |
-
position: relative;
|
| 24 |
-
padding: 20px;
|
| 25 |
-
}
|
| 26 |
-
|
| 27 |
-
/* Enhanced Animated background particles */
|
| 28 |
-
.particles {
|
| 29 |
-
position: fixed;
|
| 30 |
-
width: 100%;
|
| 31 |
-
height: 100%;
|
| 32 |
-
overflow: hidden;
|
| 33 |
-
z-index: 0;
|
| 34 |
-
top: 0;
|
| 35 |
-
left: 0;
|
| 36 |
-
}
|
| 37 |
-
|
| 38 |
-
.particle {
|
| 39 |
-
position: absolute;
|
| 40 |
-
width: 4px;
|
| 41 |
-
height: 4px;
|
| 42 |
-
background: radial-gradient(circle, #ffa500, transparent);
|
| 43 |
-
border-radius: 50%;
|
| 44 |
-
animation: float 15s infinite ease-in-out;
|
| 45 |
-
box-shadow: 0 0 10px #ffa500;
|
| 46 |
-
}
|
| 47 |
-
|
| 48 |
-
@keyframes float {
|
| 49 |
-
0% { transform: translate(0, 100vh) scale(0); opacity: 0; }
|
| 50 |
-
10% { opacity: 1; }
|
| 51 |
-
90% { opacity: 1; }
|
| 52 |
-
100% { transform: translate(var(--tx), -100vh) scale(1); opacity: 0; }
|
| 53 |
-
}
|
| 54 |
-
|
| 55 |
-
/* Sparkles */
|
| 56 |
-
.sparkle {
|
| 57 |
-
position: absolute;
|
| 58 |
-
width: 3px;
|
| 59 |
-
height: 3px;
|
| 60 |
-
background: #fff;
|
| 61 |
-
border-radius: 50%;
|
| 62 |
-
animation: sparkle 3s infinite;
|
| 63 |
-
box-shadow: 0 0 8px #fff;
|
| 64 |
-
}
|
| 65 |
-
|
| 66 |
-
@keyframes sparkle {
|
| 67 |
-
0%, 100% { opacity: 0; transform: scale(0); }
|
| 68 |
-
50% { opacity: 1; transform: scale(1.5); }
|
| 69 |
-
}
|
| 70 |
-
|
| 71 |
-
.title {
|
| 72 |
-
font-size: 52px;
|
| 73 |
-
font-weight: 700;
|
| 74 |
-
background: linear-gradient(90deg, #ffa500, #ff6b35, #ffa500);
|
| 75 |
-
background-size: 200% auto;
|
| 76 |
-
-webkit-background-clip: text;
|
| 77 |
-
-webkit-text-fill-color: transparent;
|
| 78 |
-
background-clip: text;
|
| 79 |
-
margin-bottom: 50px;
|
| 80 |
-
text-align: center;
|
| 81 |
-
z-index: 2;
|
| 82 |
-
text-shadow: 0 0 30px rgba(255, 165, 0, 0.5);
|
| 83 |
-
animation: gradientMove 3s linear infinite, fadeInDown 1s ease;
|
| 84 |
-
letter-spacing: 2px;
|
| 85 |
-
}
|
| 86 |
-
|
| 87 |
-
@keyframes gradientMove {
|
| 88 |
-
0% { background-position: 0% center; }
|
| 89 |
-
100% { background-position: 200% center; }
|
| 90 |
-
}
|
| 91 |
-
|
| 92 |
-
@keyframes fadeInDown {
|
| 93 |
-
from {
|
| 94 |
-
opacity: 0;
|
| 95 |
-
transform: translateY(-50px);
|
| 96 |
-
}
|
| 97 |
-
to {
|
| 98 |
-
opacity: 1;
|
| 99 |
-
transform: translateY(0);
|
| 100 |
-
}
|
| 101 |
-
}
|
| 102 |
-
|
| 103 |
-
.container {
|
| 104 |
-
display: flex;
|
| 105 |
-
align-items: center;
|
| 106 |
-
justify-content: center;
|
| 107 |
-
gap: 80px;
|
| 108 |
-
background: rgba(30, 40, 60, 0.5);
|
| 109 |
-
padding: 70px;
|
| 110 |
-
border-radius: 30px;
|
| 111 |
-
border: 2px solid rgba(255, 255, 255, 0.1);
|
| 112 |
-
backdrop-filter: blur(20px);
|
| 113 |
-
box-shadow: 0 30px 80px rgba(0, 0, 0, 0.6), inset 0 0 40px rgba(255, 165, 0, 0.05);
|
| 114 |
-
z-index: 2;
|
| 115 |
-
animation: fadeIn 1.2s ease 0.3s backwards;
|
| 116 |
-
position: relative;
|
| 117 |
-
max-width: 1200px;
|
| 118 |
-
}
|
| 119 |
-
|
| 120 |
-
@keyframes fadeIn {
|
| 121 |
-
from {
|
| 122 |
-
opacity: 0;
|
| 123 |
-
transform: scale(0.8) rotateX(10deg);
|
| 124 |
-
}
|
| 125 |
-
to {
|
| 126 |
-
opacity: 1;
|
| 127 |
-
transform: scale(1) rotateX(0);
|
| 128 |
-
}
|
| 129 |
-
}
|
| 130 |
-
|
| 131 |
-
/* Enhanced Lamp Section */
|
| 132 |
-
.lamp-section {
|
| 133 |
-
position: relative;
|
| 134 |
-
display: flex;
|
| 135 |
-
align-items: center;
|
| 136 |
-
justify-content: center;
|
| 137 |
-
width: 300px;
|
| 138 |
-
height: 400px;
|
| 139 |
-
}
|
| 140 |
-
|
| 141 |
-
.lamp-container {
|
| 142 |
-
position: relative;
|
| 143 |
-
cursor: pointer;
|
| 144 |
-
transition: transform 0.3s ease;
|
| 145 |
-
}
|
| 146 |
-
|
| 147 |
-
.lamp-container:hover {
|
| 148 |
-
transform: scale(1.05);
|
| 149 |
-
}
|
| 150 |
-
|
| 151 |
-
.lamp {
|
| 152 |
-
position: relative;
|
| 153 |
-
width: 200px;
|
| 154 |
-
height: 320px;
|
| 155 |
-
display: flex;
|
| 156 |
-
align-items: flex-end;
|
| 157 |
-
justify-content: center;
|
| 158 |
-
animation: lampFloat 4s ease-in-out infinite;
|
| 159 |
-
filter: drop-shadow(0 10px 30px rgba(0, 0, 0, 0.5));
|
| 160 |
-
}
|
| 161 |
-
|
| 162 |
-
@keyframes lampFloat {
|
| 163 |
-
0%, 100% { transform: translateY(0) rotate(0deg); }
|
| 164 |
-
25% { transform: translateY(-12px) rotate(-2deg); }
|
| 165 |
-
75% { transform: translateY(-8px) rotate(2deg); }
|
| 166 |
-
}
|
| 167 |
-
|
| 168 |
-
/* Electric Cord */
|
| 169 |
-
.cord {
|
| 170 |
-
position: absolute;
|
| 171 |
-
width: 4px;
|
| 172 |
-
height: 80px;
|
| 173 |
-
background: linear-gradient(180deg, #555, #333);
|
| 174 |
-
top: -80px;
|
| 175 |
-
left: 50%;
|
| 176 |
-
transform: translateX(-50%);
|
| 177 |
-
border-radius: 2px;
|
| 178 |
-
animation: cordSwing 4s ease-in-out infinite;
|
| 179 |
-
transform-origin: top center;
|
| 180 |
-
}
|
| 181 |
-
|
| 182 |
-
@keyframes cordSwing {
|
| 183 |
-
0%, 100% { transform: translateX(-50%) rotate(0deg); }
|
| 184 |
-
25% { transform: translateX(-50%) rotate(-3deg); }
|
| 185 |
-
75% { transform: translateX(-50%) rotate(3deg); }
|
| 186 |
-
}
|
| 187 |
-
|
| 188 |
-
.cord-plug {
|
| 189 |
-
position: absolute;
|
| 190 |
-
top: -10px;
|
| 191 |
-
left: 50%;
|
| 192 |
-
transform: translateX(-50%);
|
| 193 |
-
width: 12px;
|
| 194 |
-
height: 12px;
|
| 195 |
-
background: #444;
|
| 196 |
-
border-radius: 50%;
|
| 197 |
-
box-shadow: 0 2px 5px rgba(0,0,0,0.5);
|
| 198 |
-
}
|
| 199 |
-
|
| 200 |
-
.lamp-stand {
|
| 201 |
-
width: 10px;
|
| 202 |
-
height: 160px;
|
| 203 |
-
background: linear-gradient(180deg, #e8e8e8, #fff, #e8e8e8);
|
| 204 |
-
border-radius: 5px;
|
| 205 |
-
position: absolute;
|
| 206 |
-
bottom: 0;
|
| 207 |
-
box-shadow: 0 6px 15px rgba(0, 0, 0, 0.4), inset 0 0 10px rgba(255, 255, 255, 0.5);
|
| 208 |
-
}
|
| 209 |
-
|
| 210 |
-
.lamp-base {
|
| 211 |
-
width: 80px;
|
| 212 |
-
height: 25px;
|
| 213 |
-
background: linear-gradient(180deg, #d0d0d0, #f8f8f8, #d0d0d0);
|
| 214 |
-
border-radius: 15px;
|
| 215 |
-
position: absolute;
|
| 216 |
-
bottom: 0;
|
| 217 |
-
box-shadow: 0 6px 20px rgba(0, 0, 0, 0.5), inset 0 -3px 10px rgba(0, 0, 0, 0.2);
|
| 218 |
-
}
|
| 219 |
-
|
| 220 |
-
.lamp-shade {
|
| 221 |
-
width: 140px;
|
| 222 |
-
height: 110px;
|
| 223 |
-
background: linear-gradient(180deg, #95d4a8, #a8d5ba, #8bc9a8);
|
| 224 |
-
clip-path: polygon(22% 0%, 78% 0%, 100% 100%, 0% 100%);
|
| 225 |
-
position: absolute;
|
| 226 |
-
top: 40px;
|
| 227 |
-
border-radius: 10px 10px 0 0;
|
| 228 |
-
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.4), inset 0 -15px 40px rgba(255, 255, 255, 0.4);
|
| 229 |
-
animation: shadeGlow 3s ease-in-out infinite;
|
| 230 |
-
transition: all 0.3s ease;
|
| 231 |
-
}
|
| 232 |
-
|
| 233 |
-
.lamp-shade.happy {
|
| 234 |
-
filter: brightness(1.2);
|
| 235 |
-
}
|
| 236 |
-
|
| 237 |
-
.lamp-shade.sad {
|
| 238 |
-
filter: brightness(0.8);
|
| 239 |
-
}
|
| 240 |
-
|
| 241 |
-
@keyframes shadeGlow {
|
| 242 |
-
0%, 100% {
|
| 243 |
-
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.4), inset 0 -15px 40px rgba(255, 255, 255, 0.4);
|
| 244 |
-
filter: brightness(1);
|
| 245 |
-
}
|
| 246 |
-
50% {
|
| 247 |
-
box-shadow: 0 10px 40px rgba(168, 213, 186, 0.7), inset 0 -15px 50px rgba(255, 255, 255, 0.6);
|
| 248 |
-
filter: brightness(1.15);
|
| 249 |
-
}
|
| 250 |
-
}
|
| 251 |
-
|
| 252 |
-
/* Enhanced Lamp Face with expressions */
|
| 253 |
-
.lamp-face {
|
| 254 |
-
position: absolute;
|
| 255 |
-
top: 75px;
|
| 256 |
-
width: 140px;
|
| 257 |
-
height: 90px;
|
| 258 |
-
z-index: 2;
|
| 259 |
-
transition: all 0.3s ease;
|
| 260 |
-
}
|
| 261 |
-
|
| 262 |
-
.lamp-eye {
|
| 263 |
-
width: 14px;
|
| 264 |
-
height: 20px;
|
| 265 |
-
background: #2d3436;
|
| 266 |
-
border-radius: 50%;
|
| 267 |
-
position: absolute;
|
| 268 |
-
top: 20px;
|
| 269 |
-
transition: all 0.3s ease;
|
| 270 |
-
animation: blink 5s infinite;
|
| 271 |
-
}
|
| 272 |
-
|
| 273 |
-
.lamp-eye.left {
|
| 274 |
-
left: 35px;
|
| 275 |
-
}
|
| 276 |
-
|
| 277 |
-
.lamp-eye.right {
|
| 278 |
-
right: 35px;
|
| 279 |
-
}
|
| 280 |
-
|
| 281 |
-
.lamp-eye::before {
|
| 282 |
-
content: '';
|
| 283 |
-
position: absolute;
|
| 284 |
-
width: 5px;
|
| 285 |
-
height: 5px;
|
| 286 |
-
background: #fff;
|
| 287 |
-
border-radius: 50%;
|
| 288 |
-
top: 4px;
|
| 289 |
-
left: 3px;
|
| 290 |
-
animation: eyeShine 3s infinite;
|
| 291 |
-
}
|
| 292 |
-
|
| 293 |
-
@keyframes eyeShine {
|
| 294 |
-
0%, 100% { opacity: 0.8; }
|
| 295 |
-
50% { opacity: 1; }
|
| 296 |
-
}
|
| 297 |
-
|
| 298 |
-
@keyframes blink {
|
| 299 |
-
0%, 46%, 50%, 100% { height: 20px; }
|
| 300 |
-
48% { height: 2px; }
|
| 301 |
-
}
|
| 302 |
-
|
| 303 |
-
/* Lamp Tongue (when light is on) */
|
| 304 |
-
.lamp-tongue {
|
| 305 |
-
position: absolute;
|
| 306 |
-
width: 20px;
|
| 307 |
-
height: 25px;
|
| 308 |
-
background: #ff6b6b;
|
| 309 |
-
border-radius: 0 0 10px 10px;
|
| 310 |
-
bottom: 15px;
|
| 311 |
-
left: 50%;
|
| 312 |
-
transform: translateX(-50%) scaleY(0);
|
| 313 |
-
transform-origin: top;
|
| 314 |
-
opacity: 0;
|
| 315 |
-
transition: all 0.3s ease;
|
| 316 |
-
}
|
| 317 |
-
|
| 318 |
-
.lamp-tongue.show {
|
| 319 |
-
transform: translateX(-50%) scaleY(1);
|
| 320 |
-
opacity: 1;
|
| 321 |
-
animation: tongueWiggle 0.5s ease-in-out;
|
| 322 |
-
}
|
| 323 |
-
|
| 324 |
-
@keyframes tongueWiggle {
|
| 325 |
-
0%, 100% { transform: translateX(-50%) scaleY(1) rotate(0deg); }
|
| 326 |
-
25% { transform: translateX(-50%) scaleY(1) rotate(-5deg); }
|
| 327 |
-
75% { transform: translateX(-50%) scaleY(1) rotate(5deg); }
|
| 328 |
-
}
|
| 329 |
-
|
| 330 |
-
.lamp-mouth {
|
| 331 |
-
width: 35px;
|
| 332 |
-
height: 18px;
|
| 333 |
-
border: 3px solid #e74c3c;
|
| 334 |
-
border-top: none;
|
| 335 |
-
border-radius: 0 0 35px 35px;
|
| 336 |
-
position: absolute;
|
| 337 |
-
top: 48px;
|
| 338 |
-
left: 50%;
|
| 339 |
-
transform: translateX(-50%);
|
| 340 |
-
transition: all 0.3s ease;
|
| 341 |
-
animation: smile 4s ease-in-out infinite;
|
| 342 |
-
}
|
| 343 |
-
|
| 344 |
-
.lamp-mouth.sad {
|
| 345 |
-
border-radius: 35px 35px 0 0;
|
| 346 |
-
border-top: 3px solid #e74c3c;
|
| 347 |
-
border-bottom: none;
|
| 348 |
-
top: 55px;
|
| 349 |
-
}
|
| 350 |
-
|
| 351 |
-
@keyframes smile {
|
| 352 |
-
0%, 100% { width: 35px; }
|
| 353 |
-
50% { width: 40px; }
|
| 354 |
-
}
|
| 355 |
-
|
| 356 |
-
.lamp-blush {
|
| 357 |
-
width: 22px;
|
| 358 |
-
height: 14px;
|
| 359 |
-
background: rgba(231, 76, 60, 0.5);
|
| 360 |
-
border-radius: 50%;
|
| 361 |
-
position: absolute;
|
| 362 |
-
top: 40px;
|
| 363 |
-
animation: blushPulse 3s ease-in-out infinite;
|
| 364 |
-
}
|
| 365 |
-
|
| 366 |
-
@keyframes blushPulse {
|
| 367 |
-
0%, 100% { opacity: 0.5; }
|
| 368 |
-
50% { opacity: 0.8; }
|
| 369 |
-
}
|
| 370 |
-
|
| 371 |
-
.lamp-blush.left {
|
| 372 |
-
left: 15px;
|
| 373 |
-
}
|
| 374 |
-
|
| 375 |
-
.lamp-blush.right {
|
| 376 |
-
right: 15px;
|
| 377 |
-
}
|
| 378 |
-
|
| 379 |
-
/* Enhanced Light glow effect */
|
| 380 |
-
.lamp-glow {
|
| 381 |
-
position: absolute;
|
| 382 |
-
bottom: -50px;
|
| 383 |
-
left: 50%;
|
| 384 |
-
transform: translateX(-50%);
|
| 385 |
-
width: 280px;
|
| 386 |
-
height: 280px;
|
| 387 |
-
background: radial-gradient(circle, rgba(255, 243, 176, 0.6) 0%, rgba(255, 220, 100, 0.3) 40%, transparent 70%);
|
| 388 |
-
border-radius: 50%;
|
| 389 |
-
animation: glowPulse 3s ease-in-out infinite;
|
| 390 |
-
pointer-events: none;
|
| 391 |
-
filter: blur(20px);
|
| 392 |
-
}
|
| 393 |
-
|
| 394 |
-
@keyframes glowPulse {
|
| 395 |
-
0%, 100% {
|
| 396 |
-
opacity: 0.7;
|
| 397 |
-
transform: translateX(-50%) scale(1);
|
| 398 |
-
}
|
| 399 |
-
50% {
|
| 400 |
-
opacity: 1;
|
| 401 |
-
transform: translateX(-50%) scale(1.15);
|
| 402 |
-
}
|
| 403 |
-
}
|
| 404 |
-
|
| 405 |
-
/* Enhanced Login Form */
|
| 406 |
-
.login-box {
|
| 407 |
-
background: linear-gradient(135deg, rgba(40, 50, 70, 0.8), rgba(30, 40, 60, 0.9));
|
| 408 |
-
padding: 50px 55px;
|
| 409 |
-
border-radius: 25px;
|
| 410 |
-
border: 2px solid rgba(76, 209, 55, 0.6);
|
| 411 |
-
box-shadow: 0 0 50px rgba(76, 209, 55, 0.4), inset 0 0 30px rgba(0, 0, 0, 0.3);
|
| 412 |
-
width: 420px;
|
| 413 |
-
backdrop-filter: blur(15px);
|
| 414 |
-
position: relative;
|
| 415 |
-
overflow: hidden;
|
| 416 |
-
}
|
| 417 |
-
|
| 418 |
-
.login-box::before {
|
| 419 |
-
content: '';
|
| 420 |
-
position: absolute;
|
| 421 |
-
top: -50%;
|
| 422 |
-
left: -50%;
|
| 423 |
-
width: 200%;
|
| 424 |
-
height: 200%;
|
| 425 |
-
background: linear-gradient(45deg, transparent, rgba(76, 209, 55, 0.1), transparent);
|
| 426 |
-
animation: shimmer 3s infinite;
|
| 427 |
-
}
|
| 428 |
-
|
| 429 |
-
@keyframes shimmer {
|
| 430 |
-
0% { transform: translateX(-100%) translateY(-100%) rotate(45deg); }
|
| 431 |
-
100% { transform: translateX(100%) translateY(100%) rotate(45deg); }
|
| 432 |
-
}
|
| 433 |
-
|
| 434 |
-
.login-box > * {
|
| 435 |
-
position: relative;
|
| 436 |
-
z-index: 1;
|
| 437 |
-
}
|
| 438 |
-
|
| 439 |
-
.login-box h2 {
|
| 440 |
-
text-align: center;
|
| 441 |
-
color: #fff;
|
| 442 |
-
font-size: 36px;
|
| 443 |
-
margin-bottom: 35px;
|
| 444 |
-
font-weight: 700;
|
| 445 |
-
text-shadow: 0 4px 10px rgba(0, 0, 0, 0.5);
|
| 446 |
-
animation: textGlow 2s ease-in-out infinite;
|
| 447 |
-
}
|
| 448 |
-
|
| 449 |
-
@keyframes textGlow {
|
| 450 |
-
0%, 100% { text-shadow: 0 4px 10px rgba(0, 0, 0, 0.5); }
|
| 451 |
-
50% { text-shadow: 0 4px 20px rgba(76, 209, 55, 0.5); }
|
| 452 |
-
}
|
| 453 |
-
|
| 454 |
-
.input-group {
|
| 455 |
-
margin-bottom: 28px;
|
| 456 |
-
position: relative;
|
| 457 |
-
}
|
| 458 |
-
|
| 459 |
-
.input-group label {
|
| 460 |
-
display: block;
|
| 461 |
-
color: #b8c5d8;
|
| 462 |
-
font-size: 14px;
|
| 463 |
-
margin-bottom: 10px;
|
| 464 |
-
font-weight: 600;
|
| 465 |
-
letter-spacing: 0.5px;
|
| 466 |
-
}
|
| 467 |
-
|
| 468 |
-
.input-wrapper {
|
| 469 |
-
position: relative;
|
| 470 |
-
}
|
| 471 |
-
|
| 472 |
-
.input-group input {
|
| 473 |
-
width: 100%;
|
| 474 |
-
padding: 16px 50px 16px 20px;
|
| 475 |
-
background: rgba(20, 30, 50, 0.7);
|
| 476 |
-
border: 2px solid rgba(255, 255, 255, 0.1);
|
| 477 |
-
border-radius: 12px;
|
| 478 |
-
color: #fff;
|
| 479 |
-
font-size: 15px;
|
| 480 |
-
outline: none;
|
| 481 |
-
transition: all 0.4s ease;
|
| 482 |
-
font-family: inherit;
|
| 483 |
-
}
|
| 484 |
-
|
| 485 |
-
.input-group input::placeholder {
|
| 486 |
-
color: #6c7a89;
|
| 487 |
-
}
|
| 488 |
-
|
| 489 |
-
.input-group input:focus {
|
| 490 |
-
border-color: #4cd137;
|
| 491 |
-
box-shadow: 0 0 20px rgba(76, 209, 55, 0.4), inset 0 0 10px rgba(76, 209, 55, 0.1);
|
| 492 |
-
background: rgba(20, 30, 50, 0.9);
|
| 493 |
-
transform: translateY(-2px);
|
| 494 |
-
}
|
| 495 |
-
|
| 496 |
-
.input-group input.error {
|
| 497 |
-
border-color: #e74c3c;
|
| 498 |
-
animation: shake 0.5s;
|
| 499 |
-
}
|
| 500 |
-
|
| 501 |
-
@keyframes shake {
|
| 502 |
-
0%, 100% { transform: translateX(0); }
|
| 503 |
-
25% { transform: translateX(-10px); }
|
| 504 |
-
75% { transform: translateX(10px); }
|
| 505 |
-
}
|
| 506 |
-
|
| 507 |
-
/* Password Toggle Eye */
|
| 508 |
-
.toggle-password {
|
| 509 |
-
position: absolute;
|
| 510 |
-
right: 15px;
|
| 511 |
-
top: 50%;
|
| 512 |
-
transform: translateY(-50%);
|
| 513 |
-
cursor: pointer;
|
| 514 |
-
color: #6c7a89;
|
| 515 |
-
font-size: 20px;
|
| 516 |
-
transition: color 0.3s ease;
|
| 517 |
-
user-select: none;
|
| 518 |
-
}
|
| 519 |
-
|
| 520 |
-
.toggle-password:hover {
|
| 521 |
-
color: #4cd137;
|
| 522 |
-
}
|
| 523 |
-
|
| 524 |
-
/* Social Login Buttons */
|
| 525 |
-
.social-login {
|
| 526 |
-
display: flex;
|
| 527 |
-
gap: 12px;
|
| 528 |
-
margin-bottom: 25px;
|
| 529 |
-
}
|
| 530 |
-
|
| 531 |
-
.social-btn {
|
| 532 |
-
flex: 1;
|
| 533 |
-
padding: 12px;
|
| 534 |
-
border: 2px solid rgba(255, 255, 255, 0.2);
|
| 535 |
-
background: rgba(30, 40, 60, 0.6);
|
| 536 |
-
border-radius: 10px;
|
| 537 |
-
color: #fff;
|
| 538 |
-
cursor: pointer;
|
| 539 |
-
transition: all 0.3s ease;
|
| 540 |
-
display: flex;
|
| 541 |
-
align-items: center;
|
| 542 |
-
justify-content: center;
|
| 543 |
-
gap: 8px;
|
| 544 |
-
font-size: 14px;
|
| 545 |
-
font-weight: 600;
|
| 546 |
-
}
|
| 547 |
-
|
| 548 |
-
.social-btn:hover {
|
| 549 |
-
transform: translateY(-3px);
|
| 550 |
-
border-color: #4cd137;
|
| 551 |
-
box-shadow: 0 5px 15px rgba(76, 209, 55, 0.3);
|
| 552 |
-
}
|
| 553 |
-
|
| 554 |
-
.divider {
|
| 555 |
-
text-align: center;
|
| 556 |
-
margin: 25px 0;
|
| 557 |
-
color: #6c7a89;
|
| 558 |
-
position: relative;
|
| 559 |
-
}
|
| 560 |
-
|
| 561 |
-
.divider::before,
|
| 562 |
-
.divider::after {
|
| 563 |
-
content: '';
|
| 564 |
-
position: absolute;
|
| 565 |
-
top: 50%;
|
| 566 |
-
width: 40%;
|
| 567 |
-
height: 1px;
|
| 568 |
-
background: rgba(255, 255, 255, 0.1);
|
| 569 |
-
}
|
| 570 |
-
|
| 571 |
-
.divider::before {
|
| 572 |
-
left: 0;
|
| 573 |
-
}
|
| 574 |
-
|
| 575 |
-
.divider::after {
|
| 576 |
-
right: 0;
|
| 577 |
-
}
|
| 578 |
-
|
| 579 |
-
.login-btn {
|
| 580 |
-
width: 100%;
|
| 581 |
-
padding: 17px;
|
| 582 |
-
background: linear-gradient(135deg, #4cd137, #44bd32, #3da82a);
|
| 583 |
-
background-size: 200% auto;
|
| 584 |
-
border: none;
|
| 585 |
-
border-radius: 12px;
|
| 586 |
-
color: #fff;
|
| 587 |
-
font-size: 18px;
|
| 588 |
-
font-weight: 700;
|
| 589 |
-
cursor: pointer;
|
| 590 |
-
transition: all 0.4s ease;
|
| 591 |
-
box-shadow: 0 6px 25px rgba(76, 209, 55, 0.5);
|
| 592 |
-
margin-top: 12px;
|
| 593 |
-
position: relative;
|
| 594 |
-
overflow: hidden;
|
| 595 |
-
letter-spacing: 1px;
|
| 596 |
-
}
|
| 597 |
-
|
| 598 |
-
.login-btn::before {
|
| 599 |
-
content: '';
|
| 600 |
-
position: absolute;
|
| 601 |
-
top: 0;
|
| 602 |
-
left: -100%;
|
| 603 |
-
width: 100%;
|
| 604 |
-
height: 100%;
|
| 605 |
-
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.3), transparent);
|
| 606 |
-
transition: left 0.5s;
|
| 607 |
-
}
|
| 608 |
-
|
| 609 |
-
.login-btn:hover::before {
|
| 610 |
-
left: 100%;
|
| 611 |
-
}
|
| 612 |
-
|
| 613 |
-
.login-btn:hover {
|
| 614 |
-
transform: translateY(-4px);
|
| 615 |
-
box-shadow: 0 10px 30px rgba(76, 209, 55, 0.7);
|
| 616 |
-
background-position: right center;
|
| 617 |
-
}
|
| 618 |
-
|
| 619 |
-
.login-btn:active {
|
| 620 |
-
transform: translateY(-2px);
|
| 621 |
-
}
|
| 622 |
-
|
| 623 |
-
.login-btn.loading {
|
| 624 |
-
pointer-events: none;
|
| 625 |
-
opacity: 0.8;
|
| 626 |
-
}
|
| 627 |
-
|
| 628 |
-
.login-btn.loading::after {
|
| 629 |
-
content: '';
|
| 630 |
-
position: absolute;
|
| 631 |
-
width: 20px;
|
| 632 |
-
height: 20px;
|
| 633 |
-
border: 3px solid rgba(255, 255, 255, 0.3);
|
| 634 |
-
border-top-color: #fff;
|
| 635 |
-
border-radius: 50%;
|
| 636 |
-
animation: spin 0.8s linear infinite;
|
| 637 |
-
top: 50%;
|
| 638 |
-
left: 50%;
|
| 639 |
-
transform: translate(-50%, -50%);
|
| 640 |
-
}
|
| 641 |
-
|
| 642 |
-
@keyframes spin {
|
| 643 |
-
to { transform: translate(-50%, -50%) rotate(360deg); }
|
| 644 |
-
}
|
| 645 |
-
|
| 646 |
-
.forgot-password {
|
| 647 |
-
text-align: center;
|
| 648 |
-
margin-top: 22px;
|
| 649 |
-
}
|
| 650 |
-
|
| 651 |
-
.forgot-password a {
|
| 652 |
-
color: #7c8a9e;
|
| 653 |
-
text-decoration: none;
|
| 654 |
-
font-size: 14px;
|
| 655 |
-
transition: all 0.3s ease;
|
| 656 |
-
position: relative;
|
| 657 |
-
}
|
| 658 |
-
|
| 659 |
-
.forgot-password a::after {
|
| 660 |
-
content: '';
|
| 661 |
-
position: absolute;
|
| 662 |
-
bottom: -2px;
|
| 663 |
-
left: 0;
|
| 664 |
-
width: 0;
|
| 665 |
-
height: 2px;
|
| 666 |
-
background: #4cd137;
|
| 667 |
-
transition: width 0.3s ease;
|
| 668 |
-
}
|
| 669 |
-
|
| 670 |
-
.forgot-password a:hover {
|
| 671 |
-
color: #4cd137;
|
| 672 |
-
}
|
| 673 |
-
|
| 674 |
-
.forgot-password a:hover::after {
|
| 675 |
-
width: 100%;
|
| 676 |
-
}
|
| 677 |
-
|
| 678 |
-
/* Toast Notification */
|
| 679 |
-
.toast {
|
| 680 |
-
position: fixed;
|
| 681 |
-
top: 30px;
|
| 682 |
-
right: 30px;
|
| 683 |
-
background: linear-gradient(135deg, rgba(76, 209, 55, 0.95), rgba(68, 189, 50, 0.95));
|
| 684 |
-
color: #fff;
|
| 685 |
-
padding: 18px 28px;
|
| 686 |
-
border-radius: 12px;
|
| 687 |
-
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.5);
|
| 688 |
-
z-index: 1000;
|
| 689 |
-
transform: translateX(400px);
|
| 690 |
-
opacity: 0;
|
| 691 |
-
transition: all 0.4s cubic-bezier(0.68, -0.55, 0.265, 1.55);
|
| 692 |
-
display: flex;
|
| 693 |
-
align-items: center;
|
| 694 |
-
gap: 12px;
|
| 695 |
-
font-weight: 600;
|
| 696 |
-
backdrop-filter: blur(10px);
|
| 697 |
-
}
|
| 698 |
-
|
| 699 |
-
.toast.show {
|
| 700 |
-
transform: translateX(0);
|
| 701 |
-
opacity: 1;
|
| 702 |
-
}
|
| 703 |
-
|
| 704 |
-
.toast.error {
|
| 705 |
-
background: linear-gradient(135deg, rgba(231, 76, 60, 0.95), rgba(192, 57, 43, 0.95));
|
| 706 |
-
}
|
| 707 |
-
|
| 708 |
-
.toast-icon {
|
| 709 |
-
font-size: 24px;
|
| 710 |
-
}
|
| 711 |
-
|
| 712 |
-
/* Code Display Section */
|
| 713 |
-
.code-section {
|
| 714 |
-
margin-top: 60px;
|
| 715 |
-
width: 100%;
|
| 716 |
-
max-width: 1200px;
|
| 717 |
-
z-index: 2;
|
| 718 |
-
animation: fadeIn 1.5s ease 1s backwards;
|
| 719 |
-
}
|
| 720 |
-
|
| 721 |
-
.code-header {
|
| 722 |
-
text-align: center;
|
| 723 |
-
margin-bottom: 30px;
|
| 724 |
-
}
|
| 725 |
-
|
| 726 |
-
.code-header h3 {
|
| 727 |
-
font-size: 28px;
|
| 728 |
-
color: #ffa500;
|
| 729 |
-
margin-bottom: 10px;
|
| 730 |
-
text-shadow: 0 0 20px rgba(255, 165, 0, 0.5);
|
| 731 |
-
}
|
| 732 |
-
|
| 733 |
-
.code-header p {
|
| 734 |
-
color: #b0b8c8;
|
| 735 |
-
font-size: 16px;
|
| 736 |
-
}
|
| 737 |
-
|
| 738 |
-
.code-display {
|
| 739 |
-
display: grid;
|
| 740 |
-
grid-template-columns: repeat(auto-fit, minmax(450px, 1fr));
|
| 741 |
-
gap: 25px;
|
| 742 |
-
padding: 0 20px;
|
| 743 |
-
}
|
| 744 |
-
|
| 745 |
-
.code-block {
|
| 746 |
-
background: rgba(20, 25, 35, 0.9);
|
| 747 |
-
border: 1px solid rgba(76, 209, 55, 0.3);
|
| 748 |
-
border-radius: 15px;
|
| 749 |
-
padding: 25px;
|
| 750 |
-
position: relative;
|
| 751 |
-
overflow: hidden;
|
| 752 |
-
backdrop-filter: blur(10px);
|
| 753 |
-
box-shadow: 0 8px 25px rgba(0, 0, 0, 0.4);
|
| 754 |
-
transition: all 0.3s ease;
|
| 755 |
-
}
|
| 756 |
-
|
| 757 |
-
.code-block:hover {
|
| 758 |
-
transform: translateY(-5px);
|
| 759 |
-
box-shadow: 0 12px 35px rgba(76, 209, 55, 0.3);
|
| 760 |
-
border-color: #4cd137;
|
| 761 |
-
}
|
| 762 |
-
|
| 763 |
-
.code-block::before {
|
| 764 |
-
content: 'style.css';
|
| 765 |
-
position: absolute;
|
| 766 |
-
top: 0;
|
| 767 |
-
left: 0;
|
| 768 |
-
background: rgba(76, 209, 55, 0.2);
|
| 769 |
-
padding: 6px 15px;
|
| 770 |
-
font-size: 12px;
|
| 771 |
-
color: #4cd137;
|
| 772 |
-
border-radius: 0 0 10px 0;
|
| 773 |
-
font-family: 'Courier New', monospace;
|
| 774 |
-
}
|
| 775 |
-
|
| 776 |
-
.code-block pre {
|
| 777 |
-
margin-top: 30px;
|
| 778 |
-
color: #a8b2d1;
|
| 779 |
-
font-family: 'Courier New', monospace;
|
| 780 |
-
font-size: 13px;
|
| 781 |
-
line-height: 1.6;
|
| 782 |
-
overflow-x: auto;
|
| 783 |
-
}
|
| 784 |
-
|
| 785 |
-
.code-block pre code {
|
| 786 |
-
display: block;
|
| 787 |
-
}
|
| 788 |
-
|
| 789 |
-
.keyword { color: #c792ea; }
|
| 790 |
-
.property { color: #82aaff; }
|
| 791 |
-
.value { color: #c3e88d; }
|
| 792 |
-
.important { color: #ff6b6b; }
|
| 793 |
-
.comment { color: #676e95; font-style: italic; }
|
| 794 |
-
|
| 795 |
-
/* Responsive */
|
| 796 |
-
@media (max-width: 1200px) {
|
| 797 |
-
.container {
|
| 798 |
-
gap: 50px;
|
| 799 |
-
padding: 50px 40px;
|
| 800 |
-
}
|
| 801 |
-
|
| 802 |
-
.code-display {
|
| 803 |
-
grid-template-columns: 1fr;
|
| 804 |
-
}
|
| 805 |
-
}
|
| 806 |
-
|
| 807 |
-
@media (max-width: 1024px) {
|
| 808 |
-
.container {
|
| 809 |
-
flex-direction: column;
|
| 810 |
-
gap: 40px;
|
| 811 |
-
padding: 40px 30px;
|
| 812 |
-
}
|
| 813 |
-
|
| 814 |
-
.title {
|
| 815 |
-
font-size: 42px;
|
| 816 |
-
}
|
| 817 |
-
|
| 818 |
-
.lamp-section {
|
| 819 |
-
width: 250px;
|
| 820 |
-
height: 350px;
|
| 821 |
-
}
|
| 822 |
-
}
|
| 823 |
-
|
| 824 |
-
@media (max-width: 600px) {
|
| 825 |
-
.title {
|
| 826 |
-
font-size: 32px;
|
| 827 |
-
margin-bottom: 30px;
|
| 828 |
-
}
|
| 829 |
-
|
| 830 |
-
.container {
|
| 831 |
-
padding: 30px 20px;
|
| 832 |
-
margin: 20px;
|
| 833 |
-
}
|
| 834 |
-
|
| 835 |
-
.login-box {
|
| 836 |
-
width: 100%;
|
| 837 |
-
padding: 35px 25px;
|
| 838 |
-
}
|
| 839 |
-
|
| 840 |
-
.lamp {
|
| 841 |
-
transform: scale(0.75);
|
| 842 |
-
}
|
| 843 |
-
|
| 844 |
-
.social-login {
|
| 845 |
-
flex-direction: column;
|
| 846 |
-
}
|
| 847 |
-
|
| 848 |
-
.code-display {
|
| 849 |
-
padding: 0;
|
| 850 |
-
}
|
| 851 |
-
|
| 852 |
-
.code-block {
|
| 853 |
-
padding: 20px 15px;
|
| 854 |
-
}
|
| 855 |
-
|
| 856 |
-
.code-block pre {
|
| 857 |
-
font-size: 11px;
|
| 858 |
-
}
|
| 859 |
-
}
|
| 860 |
-
</style>
|
| 861 |
-
</head>
|
| 862 |
-
<body>
|
| 863 |
-
<!-- Animated particles -->
|
| 864 |
-
<div class="particles" id="particles"></div>
|
| 865 |
-
|
| 866 |
-
<!-- Title -->
|
| 867 |
-
<h1 class="title">✨ Cute Lamp Login ✨</h1>
|
| 868 |
-
|
| 869 |
-
<!-- Main Container -->
|
| 870 |
-
<div class="container">
|
| 871 |
-
<!-- Enhanced Lamp Section -->
|
| 872 |
-
<div class="lamp-section">
|
| 873 |
-
<div class="lamp-glow"></div>
|
| 874 |
-
<div class="lamp-container" id="lampContainer">
|
| 875 |
-
<div class="lamp" id="lamp">
|
| 876 |
-
<div class="cord">
|
| 877 |
-
<div class="cord-plug"></div>
|
| 878 |
-
</div>
|
| 879 |
-
<div class="lamp-shade" id="lampShade"></div>
|
| 880 |
-
<div class="lamp-face" id="lampFace">
|
| 881 |
-
<div class="lamp-eye left" id="leftEye"></div>
|
| 882 |
-
<div class="lamp-eye right" id="rightEye"></div>
|
| 883 |
-
<div class="lamp-mouth" id="lampMouth"></div>
|
| 884 |
-
<div class="lamp-tongue" id="lampTongue"></div>
|
| 885 |
-
<div class="lamp-blush left"></div>
|
| 886 |
-
<div class="lamp-blush right"></div>
|
| 887 |
-
</div>
|
| 888 |
-
<div class="lamp-stand"></div>
|
| 889 |
-
<div class="lamp-base"></div>
|
| 890 |
-
</div>
|
| 891 |
-
</div>
|
| 892 |
-
</div>
|
| 893 |
-
|
| 894 |
-
<!-- Enhanced Login Form -->
|
| 895 |
-
<div class="login-box">
|
| 896 |
-
<h2>Welcome Back</h2>
|
| 897 |
-
|
| 898 |
-
<!-- Social Login -->
|
| 899 |
-
<div class="social-login">
|
| 900 |
-
<button class="social-btn" onclick="socialLogin('Google')">
|
| 901 |
-
<span>🔍</span>
|
| 902 |
-
<span>Google</span>
|
| 903 |
-
</button>
|
| 904 |
-
<button class="social-btn" onclick="socialLogin('GitHub')">
|
| 905 |
-
<span>💻</span>
|
| 906 |
-
<span>GitHub</span>
|
| 907 |
-
</button>
|
| 908 |
-
</div>
|
| 909 |
-
|
| 910 |
-
<div class="divider">OR</div>
|
| 911 |
-
|
| 912 |
-
<form id="loginForm" onsubmit="handleLogin(event)">
|
| 913 |
-
<div class="input-group">
|
| 914 |
-
<label for="username">Username</label>
|
| 915 |
-
<div class="input-wrapper">
|
| 916 |
-
<input type="text" id="username" placeholder="Enter your username" required>
|
| 917 |
-
</div>
|
| 918 |
-
</div>
|
| 919 |
-
<div class="input-group">
|
| 920 |
-
<label for="password">Password</label>
|
| 921 |
-
<div class="input-wrapper">
|
| 922 |
-
<input type="password" id="password" placeholder="Enter your password" required>
|
| 923 |
-
<span class="toggle-password" id="togglePassword" onclick="togglePassword()">👁️</span>
|
| 924 |
-
</div>
|
| 925 |
-
</div>
|
| 926 |
-
<button type="submit" class="login-btn" id="loginBtn">Login</button>
|
| 927 |
-
</form>
|
| 928 |
-
<div class="forgot-password">
|
| 929 |
-
<a href="#" onclick="forgotPassword(); return false;">Forgot Password?</a>
|
| 930 |
-
</div>
|
| 931 |
-
</div>
|
| 932 |
-
</div>
|
| 933 |
-
|
| 934 |
-
<!-- Code Display Section -->
|
| 935 |
-
<div class="code-section">
|
| 936 |
-
<div class="code-header">
|
| 937 |
-
<h3>💻 Comment "cute" for code</h3>
|
| 938 |
-
<p>Here's a peek at the magic behind this adorable login page</p>
|
| 939 |
-
</div>
|
| 940 |
-
<div class="code-display">
|
| 941 |
-
<div class="code-block">
|
| 942 |
-
<pre><code><span class="keyword">.lamp</span> {
|
| 943 |
-
<span class="property">display</span>: <span class="value">flex</span>;
|
| 944 |
-
<span class="property">height</span>: <span class="value">320px</span>;
|
| 945 |
-
<span class="property">overflow</span>: <span class="value">visible</span> <span class="important">!important</span>;
|
| 946 |
-
}
|
| 947 |
-
|
| 948 |
-
<span class="keyword">.cord</span> {
|
| 949 |
-
<span class="property">stroke</span>: <span class="value">var(--cord)</span>;
|
| 950 |
-
}
|
| 951 |
-
|
| 952 |
-
<span class="keyword">.cord--nip</span> {
|
| 953 |
-
<span class="property">display</span>: <span class="value">none</span>;
|
| 954 |
-
}
|
| 955 |
-
|
| 956 |
-
<span class="keyword">.lamp__tongue</span> {
|
| 957 |
-
<span class="property">fill</span>: <span class="value">var(--tongue)</span>;
|
| 958 |
-
}</code></pre>
|
| 959 |
-
</div>
|
| 960 |
-
|
| 961 |
-
<div class="code-block">
|
| 962 |
-
<pre><code><span class="keyword">.login-btn:active</span> {
|
| 963 |
-
<span class="property">transform</span>: <span class="value">translateY(0px)</span>;
|
| 964 |
-
}
|
| 965 |
-
|
| 966 |
-
<span class="keyword">.form-footer</span> {
|
| 967 |
-
<span class="property">margin-top</span>: <span class="value">1.5rem</span>;
|
| 968 |
-
<span class="property">text-align</span>: <span class="value">center</span>;
|
| 969 |
-
}
|
| 970 |
-
|
| 971 |
-
<span class="keyword">.forgot-link</span> {
|
| 972 |
-
<span class="property">color</span>: <span class="value">#888</span>;
|
| 973 |
-
<span class="property">font-size</span>: <span class="value">0.9rem</span>;
|
| 974 |
-
<span class="property">text-decoration</span>: <span class="value">none</span>;
|
| 975 |
-
<span class="property">transition</span>: <span class="value">all 0.3s ease</span>;
|
| 976 |
-
}</code></pre>
|
| 977 |
-
</div>
|
| 978 |
-
</div>
|
| 979 |
-
</div>
|
| 980 |
-
|
| 981 |
-
<!-- Toast Notification -->
|
| 982 |
-
<div class="toast" id="toast">
|
| 983 |
-
<span class="toast-icon" id="toastIcon">✨</span>
|
| 984 |
-
<span id="toastMessage">Welcome!</span>
|
| 985 |
-
</div>
|
| 986 |
-
|
| 987 |
-
<script>
|
| 988 |
-
// Enhanced Particle System
|
| 989 |
-
const particlesContainer = document.getElementById('particles');
|
| 990 |
-
|
| 991 |
-
function createParticles() {
|
| 992 |
-
for (let i = 0; i < 40; i++) {
|
| 993 |
-
const particle = document.createElement('div');
|
| 994 |
-
particle.className = 'particle';
|
| 995 |
-
particle.style.left = Math.random() * 100 + '%';
|
| 996 |
-
particle.style.setProperty('--tx', (Math.random() - 0.5) * 200 + 'px');
|
| 997 |
-
particle.style.animationDelay = Math.random() * 15 + 's';
|
| 998 |
-
particle.style.animationDuration = (10 + Math.random() * 10) + 's';
|
| 999 |
-
particlesContainer.appendChild(particle);
|
| 1000 |
-
}
|
| 1001 |
-
|
| 1002 |
-
// Add sparkles
|
| 1003 |
-
for (let i = 0; i < 20; i++) {
|
| 1004 |
-
const sparkle = document.createElement('div');
|
| 1005 |
-
sparkle.className = 'sparkle';
|
| 1006 |
-
sparkle.style.left = Math.random() * 100 + '%';
|
| 1007 |
-
sparkle.style.top = Math.random() * 100 + '%';
|
| 1008 |
-
sparkle.style.animationDelay = Math.random() * 3 + 's';
|
| 1009 |
-
particlesContainer.appendChild(sparkle);
|
| 1010 |
-
}
|
| 1011 |
-
}
|
| 1012 |
-
|
| 1013 |
-
createParticles();
|
| 1014 |
-
|
| 1015 |
-
// Lamp Elements
|
| 1016 |
-
const lamp = document.getElementById('lamp');
|
| 1017 |
-
const lampShade = document.getElementById('lampShade');
|
| 1018 |
-
const lampFace = document.getElementById('lampFace');
|
| 1019 |
-
const lampMouth = document.getElementById('lampMouth');
|
| 1020 |
-
const lampTongue = document.getElementById('lampTongue');
|
| 1021 |
-
const leftEye = document.getElementById('leftEye');
|
| 1022 |
-
const rightEye = document.getElementById('rightEye');
|
| 1023 |
-
const lampContainer = document.getElementById('lampContainer');
|
| 1024 |
-
|
| 1025 |
-
// Form Elements
|
| 1026 |
-
const loginForm = document.getElementById('loginForm');
|
| 1027 |
-
const usernameInput = document.getElementById('username');
|
| 1028 |
-
const passwordInput = document.getElementById('password');
|
| 1029 |
-
const loginBtn = document.getElementById('loginBtn');
|
| 1030 |
-
const toast = document.getElementById('toast');
|
| 1031 |
-
const toastIcon = document.getElementById('toastIcon');
|
| 1032 |
-
const toastMessage = document.getElementById('toastMessage');
|
| 1033 |
-
|
| 1034 |
-
// Lamp Expressions
|
| 1035 |
-
function setLampExpression(expression) {
|
| 1036 |
-
lampShade.classList.remove('happy', 'sad');
|
| 1037 |
-
lampMouth.classList.remove('sad');
|
| 1038 |
-
lampTongue.classList.remove('show');
|
| 1039 |
-
|
| 1040 |
-
switch(expression) {
|
| 1041 |
-
case 'happy':
|
| 1042 |
-
lampShade.classList.add('happy');
|
| 1043 |
-
lampTongue.classList.add('show');
|
| 1044 |
-
leftEye.style.background = '#4cd137';
|
| 1045 |
-
rightEye.style.background = '#4cd137';
|
| 1046 |
-
break;
|
| 1047 |
-
case 'sad':
|
| 1048 |
-
lampShade.classList.add('sad');
|
| 1049 |
-
lampMouth.classList.add('sad');
|
| 1050 |
-
leftEye.style.background = '#3498db';
|
| 1051 |
-
rightEye.style.background = '#3498db';
|
| 1052 |
-
break;
|
| 1053 |
-
case 'neutral':
|
| 1054 |
-
leftEye.style.background = '#2d3436';
|
| 1055 |
-
rightEye.style.background = '#2d3436';
|
| 1056 |
-
break;
|
| 1057 |
-
}
|
| 1058 |
-
}
|
| 1059 |
-
|
| 1060 |
-
// Eye following cursor
|
| 1061 |
-
let isFollowingCursor = true;
|
| 1062 |
-
|
| 1063 |
-
document.addEventListener('mousemove', (e) => {
|
| 1064 |
-
if (!isFollowingCursor) return;
|
| 1065 |
-
|
| 1066 |
-
const lampRect = lamp.getBoundingClientRect();
|
| 1067 |
-
const lampCenterX = lampRect.left + lampRect.width / 2;
|
| 1068 |
-
const lampCenterY = lampRect.top + lampRect.height / 2;
|
| 1069 |
-
|
| 1070 |
-
const angle = Math.atan2(e.clientY - lampCenterY, e.clientX - lampCenterX);
|
| 1071 |
-
const distance = Math.min(3, Math.hypot(e.clientX - lampCenterX, e.clientY - lampCenterY) / 100);
|
| 1072 |
-
|
| 1073 |
-
const moveX = Math.cos(angle) * distance;
|
| 1074 |
-
const moveY = Math.sin(angle) * distance;
|
| 1075 |
-
|
| 1076 |
-
leftEye.style.transform = `translate(${moveX}px, ${moveY}px)`;
|
| 1077 |
-
rightEye.style.transform = `translate(${moveX}px, ${moveY}px)`;
|
| 1078 |
-
});
|
| 1079 |
-
|
| 1080 |
-
// Input Focus Effects
|
| 1081 |
-
const inputs = [usernameInput, passwordInput];
|
| 1082 |
-
|
| 1083 |
-
inputs.forEach(input => {
|
| 1084 |
-
input.addEventListener('focus', () => {
|
| 1085 |
-
setLampExpression('happy');
|
| 1086 |
-
isFollowingCursor = false;
|
| 1087 |
-
});
|
| 1088 |
-
|
| 1089 |
-
input.addEventListener('blur', () => {
|
| 1090 |
-
if (!usernameInput.value && !passwordInput.value) {
|
| 1091 |
-
setLampExpression('neutral');
|
| 1092 |
-
isFollowingCursor = true;
|
| 1093 |
-
}
|
| 1094 |
-
});
|
| 1095 |
-
|
| 1096 |
-
input.addEventListener('input', () => {
|
| 1097 |
-
if (input.classList.contains('error')) {
|
| 1098 |
-
input.classList.remove('error');
|
| 1099 |
-
setLampExpression('happy');
|
| 1100 |
-
}
|
| 1101 |
-
});
|
| 1102 |
-
});
|
| 1103 |
-
|
| 1104 |
-
// Toast Notification Function
|
| 1105 |
-
function showToast(message, type = 'success') {
|
| 1106 |
-
toast.className = 'toast';
|
| 1107 |
-
if (type === 'error') {
|
| 1108 |
-
toast.classList.add('error');
|
| 1109 |
-
toastIcon.textContent = '❌';
|
| 1110 |
-
} else {
|
| 1111 |
-
toastIcon.textContent = '✨';
|
| 1112 |
-
}
|
| 1113 |
-
|
| 1114 |
-
toastMessage.textContent = message;
|
| 1115 |
-
|
| 1116 |
-
setTimeout(() => {
|
| 1117 |
-
toast.classList.add('show');
|
| 1118 |
-
}, 100);
|
| 1119 |
-
|
| 1120 |
-
setTimeout(() => {
|
| 1121 |
-
toast.classList.remove('show');
|
| 1122 |
-
}, 3000);
|
| 1123 |
-
}
|
| 1124 |
-
|
| 1125 |
-
// Password Toggle
|
| 1126 |
-
function togglePassword() {
|
| 1127 |
-
const type = passwordInput.type === 'password' ? 'text' : 'password';
|
| 1128 |
-
passwordInput.type = type;
|
| 1129 |
-
document.getElementById('togglePassword').textContent = type === 'password' ? '👁️' : '🙈';
|
| 1130 |
-
}
|
| 1131 |
-
|
| 1132 |
-
// Form Validation
|
| 1133 |
-
function validateForm() {
|
| 1134 |
-
let isValid = true;
|
| 1135 |
-
|
| 1136 |
-
if (usernameInput.value.trim().length < 3) {
|
| 1137 |
-
usernameInput.classList.add('error');
|
| 1138 |
-
isValid = false;
|
| 1139 |
-
}
|
| 1140 |
-
|
| 1141 |
-
if (passwordInput.value.trim().length < 6) {
|
| 1142 |
-
passwordInput.classList.add('error');
|
| 1143 |
-
isValid = false;
|
| 1144 |
-
}
|
| 1145 |
-
|
| 1146 |
-
if (!isValid) {
|
| 1147 |
-
setLampExpression('sad');
|
| 1148 |
-
showToast('Please check your credentials! 😢', 'error');
|
| 1149 |
-
}
|
| 1150 |
-
|
| 1151 |
-
return isValid;
|
| 1152 |
-
}
|
| 1153 |
-
|
| 1154 |
-
// Handle Login
|
| 1155 |
-
function handleLogin(e) {
|
| 1156 |
-
e.preventDefault();
|
| 1157 |
-
|
| 1158 |
-
if (!validateForm()) {
|
| 1159 |
-
return;
|
| 1160 |
-
}
|
| 1161 |
-
|
| 1162 |
-
// Show loading state
|
| 1163 |
-
loginBtn.classList.add('loading');
|
| 1164 |
-
loginBtn.textContent = '';
|
| 1165 |
-
setLampExpression('happy');
|
| 1166 |
-
|
| 1167 |
-
// Simulate API call
|
| 1168 |
-
setTimeout(() => {
|
| 1169 |
-
loginBtn.classList.remove('loading');
|
| 1170 |
-
loginBtn.textContent = 'Login';
|
| 1171 |
-
|
| 1172 |
-
const username = usernameInput.value;
|
| 1173 |
-
showToast(`Welcome back, ${username}! 🎉`, 'success');
|
| 1174 |
-
|
| 1175 |
-
// Reset form after success
|
| 1176 |
-
setTimeout(() => {
|
| 1177 |
-
loginForm.reset();
|
| 1178 |
-
setLampExpression('neutral');
|
| 1179 |
-
isFollowingCursor = true;
|
| 1180 |
-
}, 2000);
|
| 1181 |
-
}, 2000);
|
| 1182 |
-
}
|
| 1183 |
-
|
| 1184 |
-
// Social Login
|
| 1185 |
-
function socialLogin(platform) {
|
| 1186 |
-
setLampExpression('happy');
|
| 1187 |
-
showToast(`Logging in with ${platform}... 🚀`, 'success');
|
| 1188 |
-
|
| 1189 |
-
setTimeout(() => {
|
| 1190 |
-
showToast(`${platform} login successful! 🎊`, 'success');
|
| 1191 |
-
}, 1500);
|
| 1192 |
-
}
|
| 1193 |
-
|
| 1194 |
-
// Forgot Password
|
| 1195 |
-
function forgotPassword() {
|
| 1196 |
-
setLampExpression('sad');
|
| 1197 |
-
showToast('Password reset link sent to your email! 📧', 'success');
|
| 1198 |
-
|
| 1199 |
-
setTimeout(() => {
|
| 1200 |
-
setLampExpression('neutral');
|
| 1201 |
-
}, 2000);
|
| 1202 |
-
}
|
| 1203 |
-
|
| 1204 |
-
// Lamp Click Interaction
|
| 1205 |
-
lampContainer.addEventListener('click', () => {
|
| 1206 |
-
setLampExpression('happy');
|
| 1207 |
-
lampContainer.style.transform = 'scale(1.1)';
|
| 1208 |
-
|
| 1209 |
-
setTimeout(() => {
|
| 1210 |
-
lampContainer.style.transform = 'scale(1)';
|
| 1211 |
-
}, 300);
|
| 1212 |
-
|
| 1213 |
-
setTimeout(() => {
|
| 1214 |
-
if (!usernameInput.value && !passwordInput.value) {
|
| 1215 |
-
setLampExpression('neutral');
|
| 1216 |
-
}
|
| 1217 |
-
}, 2000);
|
| 1218 |
-
});
|
| 1219 |
-
|
| 1220 |
-
// Welcome Animation
|
| 1221 |
-
window.addEventListener('load', () => {
|
| 1222 |
-
setTimeout(() => {
|
| 1223 |
-
showToast('Hello! Ready to login? 👋', 'success');
|
| 1224 |
-
}, 1000);
|
| 1225 |
-
});
|
| 1226 |
-
|
| 1227 |
-
// Easter Egg: Konami Code
|
| 1228 |
-
let konamiCode = [];
|
| 1229 |
-
const konamiSequence = ['ArrowUp', 'ArrowUp', 'ArrowDown', 'ArrowDown', 'ArrowLeft', 'ArrowRight', 'ArrowLeft', 'ArrowRight', 'b', 'a'];
|
| 1230 |
-
|
| 1231 |
-
document.addEventListener('keydown', (e) => {
|
| 1232 |
-
konamiCode.push(e.key);
|
| 1233 |
-
konamiCode = konamiCode.slice(-10);
|
| 1234 |
-
|
| 1235 |
-
if (konamiCode.join(',') === konamiSequence.join(',')) {
|
| 1236 |
-
lampShade.style.background = 'linear-gradient(180deg, #ff6b9d, #c44569)';
|
| 1237 |
-
showToast('🎉 Rainbow Lamp Mode Activated! 🌈', 'success');
|
| 1238 |
-
|
| 1239 |
-
let hue = 0;
|
| 1240 |
-
const rainbowInterval = setInterval(() => {
|
| 1241 |
-
lampShade.style.background = `hsl(${hue}, 70%, 60%)`;
|
| 1242 |
-
hue = (hue + 5) % 360;
|
| 1243 |
-
}, 100);
|
| 1244 |
-
|
| 1245 |
-
setTimeout(() => {
|
| 1246 |
-
clearInterval(rainbowInterval);
|
| 1247 |
-
lampShade.style.background = '';
|
| 1248 |
-
showToast('Back to normal! ✨', 'success');
|
| 1249 |
-
}, 5000);
|
| 1250 |
-
}
|
| 1251 |
-
});
|
| 1252 |
-
|
| 1253 |
-
// Keyboard shortcuts
|
| 1254 |
-
document.addEventListener('keydown', (e) => {
|
| 1255 |
-
// Ctrl/Cmd + Enter to submit
|
| 1256 |
-
if ((e.ctrlKey || e.metaKey) && e.key === 'Enter') {
|
| 1257 |
-
if (usernameInput.value || passwordInput.value) {
|
| 1258 |
-
loginForm.dispatchEvent(new Event('submit'));
|
| 1259 |
-
}
|
| 1260 |
-
}
|
| 1261 |
-
});
|
| 1262 |
-
|
| 1263 |
-
// Auto-focus username on load
|
| 1264 |
-
window.addEventListener('load', () => {
|
| 1265 |
-
setTimeout(() => {
|
| 1266 |
-
usernameInput.focus();
|
| 1267 |
-
}, 500);
|
| 1268 |
-
});
|
| 1269 |
-
|
| 1270 |
-
// Add accessibility: ESC to clear form
|
| 1271 |
-
document.addEventListener('keydown', (e) => {
|
| 1272 |
-
if (e.key === 'Escape') {
|
| 1273 |
-
loginForm.reset();
|
| 1274 |
-
setLampExpression('neutral');
|
| 1275 |
-
isFollowingCursor = true;
|
| 1276 |
-
showToast('Form cleared! 🧹', 'success');
|
| 1277 |
-
}
|
| 1278 |
-
});
|
| 1279 |
-
</script>
|
| 1280 |
-
</body>
|
| 1281 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
render.yaml
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
services:
|
| 2 |
-
- type: web
|
| 3 |
-
name: data-science-agent
|
| 4 |
-
runtime: docker
|
| 5 |
-
plan: free # Change to 'starter' or higher for production
|
| 6 |
-
region: oregon # Change to your preferred region
|
| 7 |
-
branch: main
|
| 8 |
-
dockerfilePath: ./Dockerfile
|
| 9 |
-
envVars:
|
| 10 |
-
- key: LLM_PROVIDER
|
| 11 |
-
value: gemini
|
| 12 |
-
- key: GOOGLE_API_KEY
|
| 13 |
-
sync: false # Mark as secret - add via Render dashboard
|
| 14 |
-
- key: GEMINI_MODEL
|
| 15 |
-
value: gemini-2.5-flash
|
| 16 |
-
- key: REASONING_EFFORT
|
| 17 |
-
value: medium
|
| 18 |
-
- key: CACHE_DB_PATH
|
| 19 |
-
value: /tmp/cache_db/cache.db
|
| 20 |
-
- key: CACHE_TTL_SECONDS
|
| 21 |
-
value: 86400
|
| 22 |
-
- key: OUTPUT_DIR
|
| 23 |
-
value: /tmp/outputs
|
| 24 |
-
- key: DATA_DIR
|
| 25 |
-
value: /tmp/data
|
| 26 |
-
- key: MAX_PARALLEL_TOOLS
|
| 27 |
-
value: 5
|
| 28 |
-
- key: MAX_RETRIES
|
| 29 |
-
value: 3
|
| 30 |
-
- key: TIMEOUT_SECONDS
|
| 31 |
-
value: 300
|
| 32 |
-
- key: PORT
|
| 33 |
-
value: 8080
|
| 34 |
-
- key: ARTIFACT_BACKEND
|
| 35 |
-
value: local
|
| 36 |
-
healthCheckPath: /api/health
|
| 37 |
-
autoDeploy: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -16,6 +16,11 @@ lightgbm>=4.6.0
|
|
| 16 |
catboost>=1.2.8
|
| 17 |
optuna>=3.5.0
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
# Explainability
|
| 20 |
shap>=0.44.1
|
| 21 |
|
|
@@ -33,6 +38,7 @@ plotly>=5.18.0 # Interactive visualizations
|
|
| 33 |
|
| 34 |
# EDA Report Generation
|
| 35 |
ydata-profiling>=4.17.0 # Comprehensive automated EDA reports with Python 3.13 compatibility
|
|
|
|
| 36 |
|
| 37 |
# User Interface
|
| 38 |
# gradio>=5.49.1 # Replaced with React frontend
|
|
@@ -44,14 +50,18 @@ python-multipart>=0.0.6 # For file uploads
|
|
| 44 |
|
| 45 |
# Text Processing
|
| 46 |
textblob>=0.17.1
|
|
|
|
| 47 |
|
| 48 |
# Time Series Forecasting
|
| 49 |
prophet>=1.1.5
|
| 50 |
holidays>=0.38
|
|
|
|
| 51 |
|
| 52 |
# MLOps & Explainability
|
| 53 |
lime==0.2.0.1
|
| 54 |
fairlearn==0.10.0
|
|
|
|
|
|
|
| 55 |
|
| 56 |
# NLP & Semantic Layer (REQUIRED for column understanding and agent routing)
|
| 57 |
sentence-transformers>=2.2.2 # For semantic column embeddings and agent routing
|
|
@@ -74,6 +84,12 @@ Pillow==10.1.0 # For basic image processing
|
|
| 74 |
# These are optional but add specialized capabilities
|
| 75 |
# lifetimes==0.11.3 # For customer lifetime value modeling
|
| 76 |
# econml==0.15.0 # For advanced causal inference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
# CLI & UI
|
| 79 |
typer==0.9.0
|
|
|
|
| 16 |
catboost>=1.2.8
|
| 17 |
optuna>=3.5.0
|
| 18 |
|
| 19 |
+
# AutoGluon AutoML (modular install - only tabular + timeseries)
|
| 20 |
+
autogluon.tabular>=1.2
|
| 21 |
+
autogluon.timeseries>=1.2
|
| 22 |
+
holidays>=0.40 # Holiday calendar for time series covariates
|
| 23 |
+
|
| 24 |
# Explainability
|
| 25 |
shap>=0.44.1
|
| 26 |
|
|
|
|
| 38 |
|
| 39 |
# EDA Report Generation
|
| 40 |
ydata-profiling>=4.17.0 # Comprehensive automated EDA reports with Python 3.13 compatibility
|
| 41 |
+
sweetviz>=2.3.0 # Interactive EDA with comparison support
|
| 42 |
|
| 43 |
# User Interface
|
| 44 |
# gradio>=5.49.1 # Replaced with React frontend
|
|
|
|
| 50 |
|
| 51 |
# Text Processing
|
| 52 |
textblob>=0.17.1
|
| 53 |
+
vaderSentiment>=3.3.2 # Rule-based sentiment analysis (fast, no GPU needed)
|
| 54 |
|
| 55 |
# Time Series Forecasting
|
| 56 |
prophet>=1.1.5
|
| 57 |
holidays>=0.38
|
| 58 |
+
pmdarima>=2.0 # Auto ARIMA (auto_arima) for optimal order selection
|
| 59 |
|
| 60 |
# MLOps & Explainability
|
| 61 |
lime==0.2.0.1
|
| 62 |
fairlearn==0.10.0
|
| 63 |
+
evidently>=0.4.0 # Production-grade data drift detection & monitoring
|
| 64 |
+
dtreeviz>=2.2 # Decision tree visualization
|
| 65 |
|
| 66 |
# NLP & Semantic Layer (REQUIRED for column understanding and agent routing)
|
| 67 |
sentence-transformers>=2.2.2 # For semantic column embeddings and agent routing
|
|
|
|
| 84 |
# These are optional but add specialized capabilities
|
| 85 |
# lifetimes==0.11.3 # For customer lifetime value modeling
|
| 86 |
# econml==0.15.0 # For advanced causal inference
|
| 87 |
+
dowhy>=0.11 # Formal causal inference with DAG-based reasoning
|
| 88 |
+
|
| 89 |
+
# Data Quality & Validation
|
| 90 |
+
cleanlab>=2.6 # Label error detection using confident learning
|
| 91 |
+
pandera>=0.18 # Schema-based DataFrame validation
|
| 92 |
+
boruta>=0.3 # All-relevant feature selection (BorutaPy)
|
| 93 |
|
| 94 |
# CLI & UI
|
| 95 |
typer==0.9.0
|
src/api/app.py
CHANGED
|
@@ -383,10 +383,23 @@ async def stream_progress(session_id: str):
|
|
| 383 |
print(f"[SSE] SENDING connection event to client")
|
| 384 |
yield f"data: {safe_json_dumps(connection_event)}\n\n"
|
| 385 |
|
| 386 |
-
#
|
| 387 |
-
#
|
| 388 |
-
#
|
| 389 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 390 |
|
| 391 |
print(f"[SSE] Starting event stream loop for session {session_id}")
|
| 392 |
|
|
|
|
| 383 |
print(f"[SSE] SENDING connection event to client")
|
| 384 |
yield f"data: {safe_json_dumps(connection_event)}\n\n"
|
| 385 |
|
| 386 |
+
# 🔥 FIX: Replay any events that were emitted BEFORE this subscriber connected
|
| 387 |
+
# This handles the race condition where background analysis starts emitting events
|
| 388 |
+
# before the frontend's SSE reconnection completes
|
| 389 |
+
history = progress_manager.get_history(session_id)
|
| 390 |
+
if history:
|
| 391 |
+
print(f"[SSE] Replaying {len(history)} missed events for late-joining subscriber")
|
| 392 |
+
for past_event in history:
|
| 393 |
+
# Don't replay if it's already a terminal event
|
| 394 |
+
if past_event.get('type') != 'analysis_complete':
|
| 395 |
+
yield f"data: {safe_json_dumps(past_event)}\n\n"
|
| 396 |
+
else:
|
| 397 |
+
# If analysis already completed before we connected, send it and close
|
| 398 |
+
yield f"data: {safe_json_dumps(past_event)}\n\n"
|
| 399 |
+
print(f"[SSE] Analysis already completed before subscriber connected - closing")
|
| 400 |
+
return
|
| 401 |
+
else:
|
| 402 |
+
print(f"[SSE] No history to replay (fresh session)")
|
| 403 |
|
| 404 |
print(f"[SSE] Starting event stream loop for session {session_id}")
|
| 405 |
|
src/orchestrator.py
CHANGED
|
@@ -39,6 +39,7 @@ from .tools import (
|
|
| 39 |
profile_dataset,
|
| 40 |
detect_data_quality_issues,
|
| 41 |
analyze_correlations,
|
|
|
|
| 42 |
get_smart_summary, # NEW
|
| 43 |
clean_missing_values,
|
| 44 |
handle_outliers,
|
|
@@ -49,6 +50,16 @@ from .tools import (
|
|
| 49 |
encode_categorical,
|
| 50 |
train_baseline_models,
|
| 51 |
generate_model_report,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
# Data Wrangling Tools (3) - NEW
|
| 53 |
merge_datasets,
|
| 54 |
concat_datasets,
|
|
@@ -86,12 +97,14 @@ from .tools import (
|
|
| 86 |
perform_named_entity_recognition,
|
| 87 |
analyze_sentiment_advanced,
|
| 88 |
perform_text_similarity,
|
| 89 |
-
# Production/MLOps (5)
|
| 90 |
monitor_model_drift,
|
| 91 |
explain_predictions,
|
| 92 |
generate_model_card,
|
| 93 |
perform_ab_test_analysis,
|
| 94 |
detect_feature_leakage,
|
|
|
|
|
|
|
| 95 |
# Time Series (3)
|
| 96 |
forecast_time_series,
|
| 97 |
detect_seasonality_trends,
|
|
@@ -119,8 +132,9 @@ from .tools import (
|
|
| 119 |
generate_interactive_box_plots,
|
| 120 |
generate_interactive_time_series,
|
| 121 |
generate_plotly_dashboard,
|
| 122 |
-
# EDA Report Generation (
|
| 123 |
generate_ydata_profiling_report,
|
|
|
|
| 124 |
# Code Interpreter (2) - NEW PHASE 2 - TRUE AI AGENT CAPABILITY
|
| 125 |
execute_python_code,
|
| 126 |
execute_code_from_file,
|
|
@@ -373,6 +387,7 @@ class DataScienceCopilot:
|
|
| 373 |
"profile_dataset": profile_dataset,
|
| 374 |
"detect_data_quality_issues": detect_data_quality_issues,
|
| 375 |
"analyze_correlations": analyze_correlations,
|
|
|
|
| 376 |
"get_smart_summary": get_smart_summary, # NEW
|
| 377 |
"clean_missing_values": clean_missing_values,
|
| 378 |
"handle_outliers": handle_outliers,
|
|
@@ -383,6 +398,16 @@ class DataScienceCopilot:
|
|
| 383 |
"encode_categorical": encode_categorical,
|
| 384 |
"train_baseline_models": train_baseline_models,
|
| 385 |
"generate_model_report": generate_model_report,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 386 |
# Data Wrangling Tools (3) - NEW
|
| 387 |
"merge_datasets": merge_datasets,
|
| 388 |
"concat_datasets": concat_datasets,
|
|
@@ -420,12 +445,14 @@ class DataScienceCopilot:
|
|
| 420 |
"perform_named_entity_recognition": perform_named_entity_recognition,
|
| 421 |
"analyze_sentiment_advanced": analyze_sentiment_advanced,
|
| 422 |
"perform_text_similarity": perform_text_similarity,
|
| 423 |
-
# Production/MLOps (5)
|
| 424 |
"monitor_model_drift": monitor_model_drift,
|
| 425 |
"explain_predictions": explain_predictions,
|
| 426 |
"generate_model_card": generate_model_card,
|
| 427 |
"perform_ab_test_analysis": perform_ab_test_analysis,
|
| 428 |
"detect_feature_leakage": detect_feature_leakage,
|
|
|
|
|
|
|
| 429 |
# Time Series (3)
|
| 430 |
"forecast_time_series": forecast_time_series,
|
| 431 |
"detect_seasonality_trends": detect_seasonality_trends,
|
|
@@ -453,8 +480,9 @@ class DataScienceCopilot:
|
|
| 453 |
"generate_interactive_box_plots": generate_interactive_box_plots,
|
| 454 |
"generate_interactive_time_series": generate_interactive_time_series,
|
| 455 |
"generate_plotly_dashboard": generate_plotly_dashboard,
|
| 456 |
-
# EDA Report Generation (
|
| 457 |
"generate_ydata_profiling_report": generate_ydata_profiling_report,
|
|
|
|
| 458 |
# Code Interpreter (2) - NEW PHASE 2 - TRUE AI AGENT CAPABILITY
|
| 459 |
"execute_python_code": execute_python_code,
|
| 460 |
"execute_code_from_file": execute_code_from_file,
|
|
@@ -677,7 +705,17 @@ structure, variable relationships, and expected insights - not hardcoded domain
|
|
| 677 |
7. **IF DATETIME COLUMNS EXIST**: create_time_features(latest, date_col="<column_name>", output="./outputs/data/time_features.csv") - Extract year/month/day/hour/weekday/timestamp from each datetime column
|
| 678 |
8. encode_categorical(latest, method="auto", output="./outputs/data/encoded.csv")
|
| 679 |
9. generate_eda_plots(encoded, target_col, output_dir="./outputs/plots/eda") - Generate EDA visualizations
|
| 680 |
-
10. **ONLY IF USER EXPLICITLY REQUESTED ML**:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 681 |
10b. **ALWAYS AFTER MODEL TRAINING**: generate_ydata_profiling_report(encoded, output_path="./outputs/reports/ydata_profile.html") - Comprehensive data analysis report
|
| 682 |
11. **HYPERPARAMETER TUNING (⚠️ ONLY WHEN EXPLICITLY REQUESTED)**:
|
| 683 |
- ⚠️ **CRITICAL WARNING**: This is EXTREMELY expensive (5-10 minutes) and resource-intensive!
|
|
@@ -842,7 +880,11 @@ Use specialized tools FIRST. Only use execute_python_code for:
|
|
| 842 |
- ❌ Missing values → USE clean_missing_values() tool
|
| 843 |
- ❌ Outliers → USE handle_outliers() tool
|
| 844 |
- ❌ Standard EDA plots → USE generate_eda_plots() or generate_plotly_dashboard()
|
| 845 |
-
- ❌ Model training → USE
|
|
|
|
|
|
|
|
|
|
|
|
|
| 846 |
- ❌ Tasks with dedicated tools → USE THE TOOL, NOT custom code!
|
| 847 |
|
| 848 |
**Rule of Thumb:**
|
|
@@ -854,7 +896,15 @@ Use specialized tools FIRST. Only use execute_python_code for:
|
|
| 854 |
- force_numeric_conversion: Converts string columns to numeric (auto-detects, skips text)
|
| 855 |
- clean_missing_values: "auto" mode supported
|
| 856 |
- encode_categorical: one-hot/target/frequency encoding
|
| 857 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 858 |
- **⭐ execute_python_code**: Write and run custom Python code for ANY task not covered by tools (TRUE AI AGENT capability)
|
| 859 |
- **execute_code_from_file**: Run existing Python scripts
|
| 860 |
- Advanced: hyperparameter_tuning, perform_eda_analysis, handle_imbalanced_data, perform_feature_scaling, detect_anomalies, detect_and_handle_multicollinearity, auto_feature_engineering, forecast_time_series, explain_predictions, generate_business_insights, perform_topic_modeling, extract_image_features, monitor_model_drift
|
|
@@ -941,7 +991,7 @@ When you've finished all tool executions and are ready to return the final respo
|
|
| 941 |
- Were there any interesting correlations or anomalies?
|
| 942 |
3. **Model performance** (if trained) - **CRITICAL: YOU MUST INCLUDE THESE METRICS**:
|
| 943 |
- **ALWAYS extract and display** the exact metrics from tool results:
|
| 944 |
-
- R² Score, RMSE, MAE from the train_baseline_models results
|
| 945 |
- List ALL models trained (not just the best one)
|
| 946 |
- Example: "Trained 6 models: XGBoost (R²=0.713, RMSE=0.207), Random Forest (R²=0.685, RMSE=0.218), etc."
|
| 947 |
- If hyperparameter tuning was done, show before/after comparison
|
|
@@ -1021,13 +1071,15 @@ You work collaboratively with other specialists and hand off cleaned data to pre
|
|
| 1021 |
"modeling_agent": {
|
| 1022 |
"name": "ML Modeling Specialist",
|
| 1023 |
"emoji": "🤖",
|
| 1024 |
-
"description": "Build and train predictive machine learning models to forecast outcomes, classify categories,
|
| 1025 |
-
"system_prompt": """You are the ML Modeling Specialist Agent - an expert in machine learning.
|
| 1026 |
|
| 1027 |
**Your Expertise:**
|
|
|
|
| 1028 |
- Model selection and baseline training
|
| 1029 |
- Hyperparameter tuning and optimization
|
| 1030 |
-
- Ensemble methods and
|
|
|
|
| 1031 |
- Cross-validation strategies
|
| 1032 |
- Model evaluation and performance metrics
|
| 1033 |
|
|
@@ -1039,29 +1091,49 @@ BEFORE calling any training tools, you MUST:
|
|
| 1039 |
4. If target column was provided or inferred, proceed with modeling
|
| 1040 |
5. Only if NO target is available: analyze correlations to find best candidate
|
| 1041 |
|
| 1042 |
-
**Your Tools (
|
| 1043 |
-
-
|
| 1044 |
-
-
|
|
|
|
|
|
|
|
|
|
| 1045 |
- generate_model_report, detect_model_issues
|
| 1046 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1047 |
**Your Approach:**
|
| 1048 |
1. FIRST: Profile the dataset to see actual columns (if not done)
|
| 1049 |
2. VALIDATE: Confirm target column exists
|
| 1050 |
-
3.
|
| 1051 |
-
4.
|
| 1052 |
-
5.
|
| 1053 |
-
6.
|
| 1054 |
-
7.
|
| 1055 |
-
8. Detect and address model issues (overfitting, bias, etc.)
|
| 1056 |
|
| 1057 |
**Common Errors to Avoid:**
|
| 1058 |
-
❌ Calling
|
| 1059 |
❌ Guessing column names like "Occupation", "Target", "Label"
|
| 1060 |
❌ Using execute_python_code when dedicated tools exist
|
|
|
|
| 1061 |
✅ Always verify column names from profile_dataset first
|
|
|
|
| 1062 |
|
| 1063 |
You receive preprocessed data from data engineering agents and collaborate with visualization agents for model performance plots.""",
|
| 1064 |
-
"tool_keywords": ["train", "model", "hyperparameter", "ensemble", "cross-validation", "predict", "classify", "regress"]
|
| 1065 |
},
|
| 1066 |
|
| 1067 |
"viz_agent": {
|
|
@@ -1236,11 +1308,31 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 1236 |
return result["output_path"]
|
| 1237 |
# For nested results
|
| 1238 |
if "result" in result and isinstance(result["result"], dict):
|
| 1239 |
-
|
| 1240 |
-
|
| 1241 |
-
|
| 1242 |
-
|
| 1243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1244 |
|
| 1245 |
def _determine_next_step(self, stuck_tool: str, completed_tools: List[str]) -> str:
|
| 1246 |
"""Determine what the next workflow step should be based on what's stuck."""
|
|
@@ -1569,8 +1661,35 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 1569 |
"url": f"/outputs/{report_path.replace('./outputs/', '')}"
|
| 1570 |
})
|
| 1571 |
print(f"[DEBUG] Added to artifacts[reports], total reports: {len(artifacts['reports'])}")
|
| 1572 |
-
|
| 1573 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1574 |
|
| 1575 |
# === COLLECT VISUALIZATION FILES (interactive plots, charts, etc.) ===
|
| 1576 |
elif "plot" in tool.lower() or "visualiz" in tool.lower() or "chart" in tool.lower() or "heatmap" in tool.lower() or "scatter" in tool.lower() or "histogram" in tool.lower():
|
|
@@ -2108,6 +2227,26 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 2108 |
print(f" ✓ Stripped invalid parameter '{invalid_param}': {val}")
|
| 2109 |
print(f" ℹ️ create_statistical_features creates row-wise stats (mean, std, min, max)")
|
| 2110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2111 |
# General parameter corrections for common LLM hallucinations
|
| 2112 |
if "output" in arguments and "output_path" not in arguments:
|
| 2113 |
# Many tools use 'output_path' but LLM uses 'output'
|
|
@@ -2803,13 +2942,16 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 2803 |
print(f"[DEBUG] Orchestrator received resolved_params: {resolved_params}")
|
| 2804 |
print(f"[DEBUG] Current file_path: '{file_path}', target_col: '{target_col}'")
|
| 2805 |
|
| 2806 |
-
#
|
|
|
|
| 2807 |
if not file_path or file_path == "":
|
| 2808 |
if resolved_params.get("file_path"):
|
| 2809 |
file_path = resolved_params["file_path"]
|
| 2810 |
print(f"📝 Using dataset from session: {file_path}")
|
| 2811 |
else:
|
| 2812 |
print(f"[DEBUG] No file_path in resolved_params")
|
|
|
|
|
|
|
| 2813 |
|
| 2814 |
if not target_col:
|
| 2815 |
if resolved_params.get("target_col"):
|
|
@@ -2817,8 +2959,13 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 2817 |
print(f"📝 Using target column from session: {target_col}")
|
| 2818 |
|
| 2819 |
|
| 2820 |
-
# Show session context if available
|
| 2821 |
if self.session.last_dataset or self.session.last_model:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2822 |
context_summary = self.session.get_context_summary()
|
| 2823 |
print(f"\n{context_summary}\n")
|
| 2824 |
|
|
@@ -3150,6 +3297,12 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 3150 |
cleaned_recent.append(msg)
|
| 3151 |
i += 1
|
| 3152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3153 |
messages = [system_msg, user_msg] + cleaned_recent
|
| 3154 |
print(f"✂️ Pruned conversation (keeping last 12 exchanges for better context preservation)")
|
| 3155 |
|
|
@@ -3193,6 +3346,12 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 3193 |
cleaned_recent.append(msg)
|
| 3194 |
i += 1
|
| 3195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3196 |
messages = [system_msg, user_msg] + cleaned_recent
|
| 3197 |
print(f"⚠️ Emergency pruning (conversation > 15K tokens, keeping last 8 exchanges)")
|
| 3198 |
|
|
@@ -3240,6 +3399,27 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 3240 |
)
|
| 3241 |
print(f"💰 Token budget: {token_count}/{self.token_manager.max_tokens} ({(token_count/self.token_manager.max_tokens*100):.1f}%)")
|
| 3242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3243 |
# Call LLM with function calling (provider-specific)
|
| 3244 |
if self.provider == "mistral":
|
| 3245 |
try:
|
|
@@ -4185,10 +4365,22 @@ You receive quality reports from EDA agent and deliver clean data to modeling ag
|
|
| 4185 |
loop_threshold = 1 # Stop after first retry with similar code
|
| 4186 |
print(f"⚠️ Detected repeated similar code execution")
|
| 4187 |
|
| 4188 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4189 |
if should_check_loops and tool_call_counter[tool_name] >= loop_threshold:
|
| 4190 |
-
#
|
| 4191 |
-
if workflow_history and workflow_history[-1]["tool"] == tool_name:
|
| 4192 |
print(f"\n⚠️ LOOP DETECTED: {tool_name} called {tool_call_counter[tool_name]} times consecutively!")
|
| 4193 |
print(f" This indicates the workflow is stuck. Skipping and forcing progression.")
|
| 4194 |
print(f" Last successful file: {self._get_last_successful_file(workflow_history)}")
|
|
|
|
| 39 |
profile_dataset,
|
| 40 |
detect_data_quality_issues,
|
| 41 |
analyze_correlations,
|
| 42 |
+
detect_label_errors, # NEW: cleanlab label error detection
|
| 43 |
get_smart_summary, # NEW
|
| 44 |
clean_missing_values,
|
| 45 |
handle_outliers,
|
|
|
|
| 50 |
encode_categorical,
|
| 51 |
train_baseline_models,
|
| 52 |
generate_model_report,
|
| 53 |
+
# AutoGluon Tools (9) - NEW: AutoML at Scale
|
| 54 |
+
train_with_autogluon,
|
| 55 |
+
predict_with_autogluon,
|
| 56 |
+
forecast_with_autogluon,
|
| 57 |
+
optimize_autogluon_model,
|
| 58 |
+
analyze_autogluon_model,
|
| 59 |
+
extend_autogluon_training,
|
| 60 |
+
train_multilabel_autogluon,
|
| 61 |
+
backtest_timeseries,
|
| 62 |
+
analyze_timeseries_model,
|
| 63 |
# Data Wrangling Tools (3) - NEW
|
| 64 |
merge_datasets,
|
| 65 |
concat_datasets,
|
|
|
|
| 97 |
perform_named_entity_recognition,
|
| 98 |
analyze_sentiment_advanced,
|
| 99 |
perform_text_similarity,
|
| 100 |
+
# Production/MLOps (5 + 2 new)
|
| 101 |
monitor_model_drift,
|
| 102 |
explain_predictions,
|
| 103 |
generate_model_card,
|
| 104 |
perform_ab_test_analysis,
|
| 105 |
detect_feature_leakage,
|
| 106 |
+
monitor_drift_evidently,
|
| 107 |
+
explain_with_dtreeviz,
|
| 108 |
# Time Series (3)
|
| 109 |
forecast_time_series,
|
| 110 |
detect_seasonality_trends,
|
|
|
|
| 132 |
generate_interactive_box_plots,
|
| 133 |
generate_interactive_time_series,
|
| 134 |
generate_plotly_dashboard,
|
| 135 |
+
# EDA Report Generation (2) - NEW PHASE 2
|
| 136 |
generate_ydata_profiling_report,
|
| 137 |
+
generate_sweetviz_report,
|
| 138 |
# Code Interpreter (2) - NEW PHASE 2 - TRUE AI AGENT CAPABILITY
|
| 139 |
execute_python_code,
|
| 140 |
execute_code_from_file,
|
|
|
|
| 387 |
"profile_dataset": profile_dataset,
|
| 388 |
"detect_data_quality_issues": detect_data_quality_issues,
|
| 389 |
"analyze_correlations": analyze_correlations,
|
| 390 |
+
"detect_label_errors": detect_label_errors, # NEW: cleanlab
|
| 391 |
"get_smart_summary": get_smart_summary, # NEW
|
| 392 |
"clean_missing_values": clean_missing_values,
|
| 393 |
"handle_outliers": handle_outliers,
|
|
|
|
| 398 |
"encode_categorical": encode_categorical,
|
| 399 |
"train_baseline_models": train_baseline_models,
|
| 400 |
"generate_model_report": generate_model_report,
|
| 401 |
+
# AutoGluon Tools (9) - NEW: AutoML at Scale
|
| 402 |
+
"train_with_autogluon": train_with_autogluon,
|
| 403 |
+
"predict_with_autogluon": predict_with_autogluon,
|
| 404 |
+
"forecast_with_autogluon": forecast_with_autogluon,
|
| 405 |
+
"optimize_autogluon_model": optimize_autogluon_model,
|
| 406 |
+
"analyze_autogluon_model": analyze_autogluon_model,
|
| 407 |
+
"extend_autogluon_training": extend_autogluon_training,
|
| 408 |
+
"train_multilabel_autogluon": train_multilabel_autogluon,
|
| 409 |
+
"backtest_timeseries": backtest_timeseries,
|
| 410 |
+
"analyze_timeseries_model": analyze_timeseries_model,
|
| 411 |
# Data Wrangling Tools (3) - NEW
|
| 412 |
"merge_datasets": merge_datasets,
|
| 413 |
"concat_datasets": concat_datasets,
|
|
|
|
| 445 |
"perform_named_entity_recognition": perform_named_entity_recognition,
|
| 446 |
"analyze_sentiment_advanced": analyze_sentiment_advanced,
|
| 447 |
"perform_text_similarity": perform_text_similarity,
|
| 448 |
+
# Production/MLOps (5 + 2 new)
|
| 449 |
"monitor_model_drift": monitor_model_drift,
|
| 450 |
"explain_predictions": explain_predictions,
|
| 451 |
"generate_model_card": generate_model_card,
|
| 452 |
"perform_ab_test_analysis": perform_ab_test_analysis,
|
| 453 |
"detect_feature_leakage": detect_feature_leakage,
|
| 454 |
+
"monitor_drift_evidently": monitor_drift_evidently,
|
| 455 |
+
"explain_with_dtreeviz": explain_with_dtreeviz,
|
| 456 |
# Time Series (3)
|
| 457 |
"forecast_time_series": forecast_time_series,
|
| 458 |
"detect_seasonality_trends": detect_seasonality_trends,
|
|
|
|
| 480 |
"generate_interactive_box_plots": generate_interactive_box_plots,
|
| 481 |
"generate_interactive_time_series": generate_interactive_time_series,
|
| 482 |
"generate_plotly_dashboard": generate_plotly_dashboard,
|
| 483 |
+
# EDA Report Generation (2) - NEW PHASE 2
|
| 484 |
"generate_ydata_profiling_report": generate_ydata_profiling_report,
|
| 485 |
+
"generate_sweetviz_report": generate_sweetviz_report,
|
| 486 |
# Code Interpreter (2) - NEW PHASE 2 - TRUE AI AGENT CAPABILITY
|
| 487 |
"execute_python_code": execute_python_code,
|
| 488 |
"execute_code_from_file": execute_code_from_file,
|
|
|
|
| 705 |
7. **IF DATETIME COLUMNS EXIST**: create_time_features(latest, date_col="<column_name>", output="./outputs/data/time_features.csv") - Extract year/month/day/hour/weekday/timestamp from each datetime column
|
| 706 |
8. encode_categorical(latest, method="auto", output="./outputs/data/encoded.csv")
|
| 707 |
9. generate_eda_plots(encoded, target_col, output_dir="./outputs/plots/eda") - Generate EDA visualizations
|
| 708 |
+
10. **ONLY IF USER EXPLICITLY REQUESTED ML**: train_with_autogluon(file_path=encoded, target_col=target_col, task_type="auto", time_limit=120, presets="medium_quality")
|
| 709 |
+
- AutoGluon is the DEFAULT training tool. It trains 10+ models with auto ensembling.
|
| 710 |
+
- It handles raw data directly (categoricals, missing values) but we clean first for best results.
|
| 711 |
+
- Fallback: train_baseline_models(encoded, target_col, task_type="auto") if AutoGluon unavailable.
|
| 712 |
+
- For multi-label prediction: train_multilabel_autogluon(file_path, target_cols=["col1","col2"])
|
| 713 |
+
- Post-training: optimize_autogluon_model(model_path, operation="refit_full|distill|calibrate_threshold|deploy_optimize")
|
| 714 |
+
- Model inspection: analyze_autogluon_model(model_path, operation="summary|transform_features|info")
|
| 715 |
+
- Add more models: extend_autogluon_training(model_path, operation="fit_extra")
|
| 716 |
+
- For time series: forecast_with_autogluon (supports covariates, holidays, model selection)
|
| 717 |
+
- TS backtesting: backtest_timeseries(file_path, target_col, time_col, num_val_windows=3)
|
| 718 |
+
- TS analysis: analyze_timeseries_model(model_path, data_path, time_col, operation="plot|feature_importance")
|
| 719 |
10b. **ALWAYS AFTER MODEL TRAINING**: generate_ydata_profiling_report(encoded, output_path="./outputs/reports/ydata_profile.html") - Comprehensive data analysis report
|
| 720 |
11. **HYPERPARAMETER TUNING (⚠️ ONLY WHEN EXPLICITLY REQUESTED)**:
|
| 721 |
- ⚠️ **CRITICAL WARNING**: This is EXTREMELY expensive (5-10 minutes) and resource-intensive!
|
|
|
|
| 880 |
- ❌ Missing values → USE clean_missing_values() tool
|
| 881 |
- ❌ Outliers → USE handle_outliers() tool
|
| 882 |
- ❌ Standard EDA plots → USE generate_eda_plots() or generate_plotly_dashboard()
|
| 883 |
+
- ❌ Model training → USE train_with_autogluon() (preferred) or train_baseline_models()
|
| 884 |
+
- ❌ Model optimization → USE optimize_autogluon_model() (refit, distill, deploy)
|
| 885 |
+
- ❌ Time series forecasting → USE forecast_with_autogluon() (supports covariates, holidays)
|
| 886 |
+
- ❌ Time series backtesting → USE backtest_timeseries()
|
| 887 |
+
- ❌ Multi-label prediction → USE train_multilabel_autogluon()
|
| 888 |
- ❌ Tasks with dedicated tools → USE THE TOOL, NOT custom code!
|
| 889 |
|
| 890 |
**Rule of Thumb:**
|
|
|
|
| 896 |
- force_numeric_conversion: Converts string columns to numeric (auto-detects, skips text)
|
| 897 |
- clean_missing_values: "auto" mode supported
|
| 898 |
- encode_categorical: one-hot/target/frequency encoding
|
| 899 |
+
- **⭐ train_with_autogluon**: AutoML - trains 10+ models with auto ensembling (PREFERRED)
|
| 900 |
+
- forecast_with_autogluon: Time series forecasting with AutoGluon (supports covariates, holidays, model selection)
|
| 901 |
+
- optimize_autogluon_model: Post-training optimization (refit_full, distill, calibrate_threshold, deploy_optimize, delete_models)
|
| 902 |
+
- analyze_autogluon_model: Model inspection (summary, transform_features, info)
|
| 903 |
+
- extend_autogluon_training: Add models incrementally (fit_extra, fit_weighted_ensemble)
|
| 904 |
+
- train_multilabel_autogluon: Multi-label prediction (multiple target columns)
|
| 905 |
+
- backtest_timeseries: Time series backtesting with multiple validation windows
|
| 906 |
+
- analyze_timeseries_model: TS model analysis (feature_importance, plot, make_future_dataframe)
|
| 907 |
+
- train_baseline_models: Fallback - trains 4 basic models
|
| 908 |
- **⭐ execute_python_code**: Write and run custom Python code for ANY task not covered by tools (TRUE AI AGENT capability)
|
| 909 |
- **execute_code_from_file**: Run existing Python scripts
|
| 910 |
- Advanced: hyperparameter_tuning, perform_eda_analysis, handle_imbalanced_data, perform_feature_scaling, detect_anomalies, detect_and_handle_multicollinearity, auto_feature_engineering, forecast_time_series, explain_predictions, generate_business_insights, perform_topic_modeling, extract_image_features, monitor_model_drift
|
|
|
|
| 991 |
- Were there any interesting correlations or anomalies?
|
| 992 |
3. **Model performance** (if trained) - **CRITICAL: YOU MUST INCLUDE THESE METRICS**:
|
| 993 |
- **ALWAYS extract and display** the exact metrics from tool results:
|
| 994 |
+
- R² Score, RMSE, MAE from the train_with_autogluon or train_baseline_models results
|
| 995 |
- List ALL models trained (not just the best one)
|
| 996 |
- Example: "Trained 6 models: XGBoost (R²=0.713, RMSE=0.207), Random Forest (R²=0.685, RMSE=0.218), etc."
|
| 997 |
- If hyperparameter tuning was done, show before/after comparison
|
|
|
|
| 1071 |
"modeling_agent": {
|
| 1072 |
"name": "ML Modeling Specialist",
|
| 1073 |
"emoji": "🤖",
|
| 1074 |
+
"description": "Build and train predictive machine learning models to forecast outcomes, classify categories, predict future values, or forecast time series. Perform supervised learning tasks including regression, classification, and time series forecasting. Train models using AutoGluon AutoML (preferred) or baseline models, optimize hyperparameters, conduct cross-validation, and evaluate model performance.",
|
| 1075 |
+
"system_prompt": """You are the ML Modeling Specialist Agent - an expert in machine learning powered by AutoGluon AutoML.
|
| 1076 |
|
| 1077 |
**Your Expertise:**
|
| 1078 |
+
- AutoML with AutoGluon (preferred for best results)
|
| 1079 |
- Model selection and baseline training
|
| 1080 |
- Hyperparameter tuning and optimization
|
| 1081 |
+
- Ensemble methods and model stacking
|
| 1082 |
+
- Time series forecasting
|
| 1083 |
- Cross-validation strategies
|
| 1084 |
- Model evaluation and performance metrics
|
| 1085 |
|
|
|
|
| 1091 |
4. If target column was provided or inferred, proceed with modeling
|
| 1092 |
5. Only if NO target is available: analyze correlations to find best candidate
|
| 1093 |
|
| 1094 |
+
**Your Tools (8 modeling-focused):**
|
| 1095 |
+
- train_with_autogluon (PREFERRED - AutoML with 10+ models, auto ensembling, handles raw data)
|
| 1096 |
+
- predict_with_autogluon (predictions with trained AutoGluon model)
|
| 1097 |
+
- forecast_with_autogluon (time series forecasting with AutoGluon - better than Prophet/ARIMA)
|
| 1098 |
+
- train_baseline_models (fallback - trains 4 basic models)
|
| 1099 |
+
- hyperparameter_tuning, perform_cross_validation
|
| 1100 |
- generate_model_report, detect_model_issues
|
| 1101 |
|
| 1102 |
+
**TOOL PRIORITY (use in this order):**
|
| 1103 |
+
| Task | Use This Tool | NOT This |
|
| 1104 |
+
|------|--------------|----------|
|
| 1105 |
+
| Classification/Regression | train_with_autogluon | train_baseline_models |
|
| 1106 |
+
| Time Series Forecasting | forecast_with_autogluon | forecast_time_series |
|
| 1107 |
+
| Predictions on new data | predict_with_autogluon | execute_python_code |
|
| 1108 |
+
| Quick baseline check | train_baseline_models | execute_python_code |
|
| 1109 |
+
|
| 1110 |
+
**AutoGluon Advantages (explain to user):**
|
| 1111 |
+
- Trains 10+ models automatically (vs 4 in baseline)
|
| 1112 |
+
- Auto ensembles with multi-layer stacking
|
| 1113 |
+
- Handles categorical features directly (no manual encoding needed)
|
| 1114 |
+
- Handles missing values automatically (no manual imputation needed)
|
| 1115 |
+
- Time-bounded training (won't run forever)
|
| 1116 |
+
- Better accuracy than manual model selection
|
| 1117 |
+
|
| 1118 |
**Your Approach:**
|
| 1119 |
1. FIRST: Profile the dataset to see actual columns (if not done)
|
| 1120 |
2. VALIDATE: Confirm target column exists
|
| 1121 |
+
3. PREFERRED: Use train_with_autogluon for best results
|
| 1122 |
+
4. For time series data: Use forecast_with_autogluon
|
| 1123 |
+
5. Validate with proper cross-validation if needed
|
| 1124 |
+
6. Generate comprehensive model reports with metrics
|
| 1125 |
+
7. Detect and address model issues (overfitting, bias, etc.)
|
|
|
|
| 1126 |
|
| 1127 |
**Common Errors to Avoid:**
|
| 1128 |
+
❌ Calling train tools with non-existent target column
|
| 1129 |
❌ Guessing column names like "Occupation", "Target", "Label"
|
| 1130 |
❌ Using execute_python_code when dedicated tools exist
|
| 1131 |
+
❌ Using train_baseline_models when train_with_autogluon is available
|
| 1132 |
✅ Always verify column names from profile_dataset first
|
| 1133 |
+
✅ Use train_with_autogluon as the DEFAULT training tool
|
| 1134 |
|
| 1135 |
You receive preprocessed data from data engineering agents and collaborate with visualization agents for model performance plots.""",
|
| 1136 |
+
"tool_keywords": ["train", "model", "hyperparameter", "ensemble", "cross-validation", "predict", "classify", "regress", "autogluon", "automl", "forecast"]
|
| 1137 |
},
|
| 1138 |
|
| 1139 |
"viz_agent": {
|
|
|
|
| 1308 |
return result["output_path"]
|
| 1309 |
# For nested results
|
| 1310 |
if "result" in result and isinstance(result["result"], dict):
|
| 1311 |
+
nested = result["result"]
|
| 1312 |
+
if "output_path" in nested:
|
| 1313 |
+
return nested["output_path"]
|
| 1314 |
+
# Check output_dir for dashboard-type tools
|
| 1315 |
+
if "output_dir" in nested:
|
| 1316 |
+
return nested["output_dir"]
|
| 1317 |
+
# Check generated_files from execute_python_code
|
| 1318 |
+
if "generated_files" in nested and nested["generated_files"]:
|
| 1319 |
+
return nested["generated_files"][0]
|
| 1320 |
+
# Check tool arguments for file_path as last resort
|
| 1321 |
+
args = step.get("arguments", step.get("result", {}).get("arguments", {}))
|
| 1322 |
+
if isinstance(args, dict) and "file_path" in args:
|
| 1323 |
+
import os
|
| 1324 |
+
if os.path.exists(args["file_path"]):
|
| 1325 |
+
return args["file_path"]
|
| 1326 |
+
|
| 1327 |
+
# 🔥 FIX: Return the original input file instead of a phantom path
|
| 1328 |
+
# Try to get from session or workflow state
|
| 1329 |
+
if hasattr(self, 'session') and self.session and self.session.last_dataset:
|
| 1330 |
+
return self.session.last_dataset
|
| 1331 |
+
if hasattr(self, 'workflow_state') and self.workflow_state.current_file:
|
| 1332 |
+
return self.workflow_state.current_file
|
| 1333 |
+
|
| 1334 |
+
# Last resort: return empty string instead of phantom file
|
| 1335 |
+
return "(no file found - use the original uploaded dataset)"
|
| 1336 |
|
| 1337 |
def _determine_next_step(self, stuck_tool: str, completed_tools: List[str]) -> str:
|
| 1338 |
"""Determine what the next workflow step should be based on what's stuck."""
|
|
|
|
| 1661 |
"url": f"/outputs/{report_path.replace('./outputs/', '')}"
|
| 1662 |
})
|
| 1663 |
print(f"[DEBUG] Added to artifacts[reports], total reports: {len(artifacts['reports'])}")
|
| 1664 |
+
|
| 1665 |
+
# 🔥 FIX: Extract individual plots from dashboard's 'plots' array
|
| 1666 |
+
# generate_plotly_dashboard returns {"plots": [{"output_path": ..., "status": "success"}, ...]}
|
| 1667 |
+
if "plots" in nested_result and isinstance(nested_result["plots"], list):
|
| 1668 |
+
dashboard_output_dir = nested_result.get("output_dir", "./outputs/plots/interactive")
|
| 1669 |
+
for sub_plot in nested_result["plots"]:
|
| 1670 |
+
if isinstance(sub_plot, dict) and sub_plot.get("status") == "success":
|
| 1671 |
+
sub_path = sub_plot.get("output_path", "")
|
| 1672 |
+
if sub_path:
|
| 1673 |
+
# Clean path for URL
|
| 1674 |
+
if sub_path.startswith('./outputs/'):
|
| 1675 |
+
url_path = sub_path.replace('./outputs/', '')
|
| 1676 |
+
elif sub_path.startswith('/tmp/data_science_agent/'):
|
| 1677 |
+
url_path = sub_path.replace('/tmp/data_science_agent/', '')
|
| 1678 |
+
else:
|
| 1679 |
+
url_path = sub_path.split('/')[-1]
|
| 1680 |
+
|
| 1681 |
+
plot_title = sub_path.split('/')[-1].replace('_', ' ').replace('.html', '').replace('.png', '').title()
|
| 1682 |
+
plots.append({
|
| 1683 |
+
"title": plot_title,
|
| 1684 |
+
"path": sub_path,
|
| 1685 |
+
"url": f"/outputs/{url_path}",
|
| 1686 |
+
"type": "html" if sub_path.endswith(".html") else "image"
|
| 1687 |
+
})
|
| 1688 |
+
print(f"[DEBUG] Added dashboard sub-plot: {plot_title} -> /outputs/{url_path}")
|
| 1689 |
+
|
| 1690 |
+
print(f"[DEBUG] Extracted {len(nested_result['plots'])} plots from dashboard")
|
| 1691 |
+
elif not report_path:
|
| 1692 |
+
print(f"[DEBUG] No output_path, report_path, or plots array in nested_result for report tool")
|
| 1693 |
|
| 1694 |
# === COLLECT VISUALIZATION FILES (interactive plots, charts, etc.) ===
|
| 1695 |
elif "plot" in tool.lower() or "visualiz" in tool.lower() or "chart" in tool.lower() or "heatmap" in tool.lower() or "scatter" in tool.lower() or "histogram" in tool.lower():
|
|
|
|
| 2227 |
print(f" ✓ Stripped invalid parameter '{invalid_param}': {val}")
|
| 2228 |
print(f" ℹ️ create_statistical_features creates row-wise stats (mean, std, min, max)")
|
| 2229 |
|
| 2230 |
+
# 🔥 FIX: Generic parameter sanitization - strip any unknown kwargs
|
| 2231 |
+
# This prevents "got an unexpected keyword argument" errors from LLM hallucinations
|
| 2232 |
+
import inspect
|
| 2233 |
+
try:
|
| 2234 |
+
sig = inspect.signature(tool_func)
|
| 2235 |
+
valid_params = set(sig.parameters.keys())
|
| 2236 |
+
invalid_args = [k for k in arguments.keys() if k not in valid_params]
|
| 2237 |
+
# Only strip if the function doesn't accept **kwargs
|
| 2238 |
+
has_var_keyword = any(
|
| 2239 |
+
p.kind == inspect.Parameter.VAR_KEYWORD
|
| 2240 |
+
for p in sig.parameters.values()
|
| 2241 |
+
)
|
| 2242 |
+
if invalid_args and not has_var_keyword:
|
| 2243 |
+
for invalid_param in invalid_args:
|
| 2244 |
+
val = arguments.pop(invalid_param)
|
| 2245 |
+
print(f" ✓ Stripped hallucinated parameter '{invalid_param}': {val}")
|
| 2246 |
+
print(f" ℹ️ Valid parameters for {tool_name}: {list(valid_params)}")
|
| 2247 |
+
except (ValueError, TypeError):
|
| 2248 |
+
pass # Can't inspect, skip validation
|
| 2249 |
+
|
| 2250 |
# General parameter corrections for common LLM hallucinations
|
| 2251 |
if "output" in arguments and "output_path" not in arguments:
|
| 2252 |
# Many tools use 'output_path' but LLM uses 'output'
|
|
|
|
| 2942 |
print(f"[DEBUG] Orchestrator received resolved_params: {resolved_params}")
|
| 2943 |
print(f"[DEBUG] Current file_path: '{file_path}', target_col: '{target_col}'")
|
| 2944 |
|
| 2945 |
+
# 🔥 FIX: Only use resolved file_path if user did NOT provide a new file
|
| 2946 |
+
# If file_path is already set (user uploaded a new file), DON'T override it
|
| 2947 |
if not file_path or file_path == "":
|
| 2948 |
if resolved_params.get("file_path"):
|
| 2949 |
file_path = resolved_params["file_path"]
|
| 2950 |
print(f"📝 Using dataset from session: {file_path}")
|
| 2951 |
else:
|
| 2952 |
print(f"[DEBUG] No file_path in resolved_params")
|
| 2953 |
+
else:
|
| 2954 |
+
print(f"📝 User provided new file: {file_path} (ignoring session file: {resolved_params.get('file_path', 'none')})")
|
| 2955 |
|
| 2956 |
if not target_col:
|
| 2957 |
if resolved_params.get("target_col"):
|
|
|
|
| 2959 |
print(f"📝 Using target column from session: {target_col}")
|
| 2960 |
|
| 2961 |
|
| 2962 |
+
# Show session context if available (but show CURRENT file, not old one)
|
| 2963 |
if self.session.last_dataset or self.session.last_model:
|
| 2964 |
+
# 🔥 FIX: Update session's last_dataset to current file BEFORE showing context
|
| 2965 |
+
# This prevents stale session context from misleading the LLM
|
| 2966 |
+
if file_path and file_path != self.session.last_dataset:
|
| 2967 |
+
print(f"📝 Updating session dataset: {self.session.last_dataset} → {file_path}")
|
| 2968 |
+
self.session.last_dataset = file_path
|
| 2969 |
context_summary = self.session.get_context_summary()
|
| 2970 |
print(f"\n{context_summary}\n")
|
| 2971 |
|
|
|
|
| 3297 |
cleaned_recent.append(msg)
|
| 3298 |
i += 1
|
| 3299 |
|
| 3300 |
+
# 🔥 CRITICAL FIX: Remove orphaned tool messages at the start of cleaned_recent
|
| 3301 |
+
# Mistral NEVER allows 'tool' role immediately after 'user' role
|
| 3302 |
+
while cleaned_recent and get_role(cleaned_recent[0]) == 'tool':
|
| 3303 |
+
print(f"⚠️ Removed orphaned tool message at start of pruned history")
|
| 3304 |
+
cleaned_recent.pop(0)
|
| 3305 |
+
|
| 3306 |
messages = [system_msg, user_msg] + cleaned_recent
|
| 3307 |
print(f"✂️ Pruned conversation (keeping last 12 exchanges for better context preservation)")
|
| 3308 |
|
|
|
|
| 3346 |
cleaned_recent.append(msg)
|
| 3347 |
i += 1
|
| 3348 |
|
| 3349 |
+
# 🔥 CRITICAL FIX: Remove orphaned tool messages at the start of cleaned_recent
|
| 3350 |
+
# Mistral NEVER allows 'tool' role immediately after 'user' role
|
| 3351 |
+
while cleaned_recent and get_role(cleaned_recent[0]) == 'tool':
|
| 3352 |
+
print(f"⚠️ Removed orphaned tool message at start of emergency pruned history")
|
| 3353 |
+
cleaned_recent.pop(0)
|
| 3354 |
+
|
| 3355 |
messages = [system_msg, user_msg] + cleaned_recent
|
| 3356 |
print(f"⚠️ Emergency pruning (conversation > 15K tokens, keeping last 8 exchanges)")
|
| 3357 |
|
|
|
|
| 3399 |
)
|
| 3400 |
print(f"💰 Token budget: {token_count}/{self.token_manager.max_tokens} ({(token_count/self.token_manager.max_tokens*100):.1f}%)")
|
| 3401 |
|
| 3402 |
+
# 🔥 CRITICAL: Validate message order for Mistral API compliance
|
| 3403 |
+
# Mistral requires: system → user → assistant → tool (only after assistant with tool_calls) → assistant → user...
|
| 3404 |
+
# NEVER: user → tool (this causes "Unexpected role 'tool' after role 'user'" error)
|
| 3405 |
+
if self.provider in ["mistral", "groq"]:
|
| 3406 |
+
validated_messages = []
|
| 3407 |
+
for i, msg in enumerate(messages):
|
| 3408 |
+
role = get_role(msg)
|
| 3409 |
+
|
| 3410 |
+
# Check if this is a tool message after a user message
|
| 3411 |
+
if role == 'tool' and validated_messages:
|
| 3412 |
+
prev_role = get_role(validated_messages[-1])
|
| 3413 |
+
if prev_role == 'user':
|
| 3414 |
+
# Invalid! Skip this tool message
|
| 3415 |
+
print(f"⚠️ WARNING: Skipped orphaned tool message at position {i} (after user message)")
|
| 3416 |
+
continue
|
| 3417 |
+
|
| 3418 |
+
validated_messages.append(msg)
|
| 3419 |
+
|
| 3420 |
+
messages = validated_messages
|
| 3421 |
+
print(f"✅ Message order validation complete: {len(messages)} messages")
|
| 3422 |
+
|
| 3423 |
# Call LLM with function calling (provider-specific)
|
| 3424 |
if self.provider == "mistral":
|
| 3425 |
try:
|
|
|
|
| 4365 |
loop_threshold = 1 # Stop after first retry with similar code
|
| 4366 |
print(f"⚠️ Detected repeated similar code execution")
|
| 4367 |
|
| 4368 |
+
# 🔥 FIX: Check if arguments are DIFFERENT from last call
|
| 4369 |
+
# If the same tool is called with different arguments, it's NOT a loop
|
| 4370 |
+
# (e.g., generating multiple different plots is legitimate)
|
| 4371 |
+
is_same_args = False
|
| 4372 |
+
if workflow_history and workflow_history[-1]["tool"] == tool_name:
|
| 4373 |
+
last_args = workflow_history[-1].get("arguments", {})
|
| 4374 |
+
# Compare key arguments (ignore output paths which may differ)
|
| 4375 |
+
ignore_keys = {"output_path", "output_dir"}
|
| 4376 |
+
last_key_args = {k: v for k, v in last_args.items() if k not in ignore_keys}
|
| 4377 |
+
current_key_args = {k: v for k, v in tool_args.items() if k not in ignore_keys}
|
| 4378 |
+
is_same_args = (last_key_args == current_key_args)
|
| 4379 |
+
|
| 4380 |
+
# Check for loops (same tool called threshold+ times consecutively WITH SAME ARGS)
|
| 4381 |
if should_check_loops and tool_call_counter[tool_name] >= loop_threshold:
|
| 4382 |
+
# Only flag as loop if last call was same tool WITH same arguments
|
| 4383 |
+
if workflow_history and workflow_history[-1]["tool"] == tool_name and is_same_args:
|
| 4384 |
print(f"\n⚠️ LOOP DETECTED: {tool_name} called {tool_call_counter[tool_name]} times consecutively!")
|
| 4385 |
print(f" This indicates the workflow is stuck. Skipping and forcing progression.")
|
| 4386 |
print(f" Last successful file: {self._get_last_successful_file(workflow_history)}")
|
src/tools/__init__.py
CHANGED
|
@@ -5,7 +5,8 @@ from .data_profiling import (
|
|
| 5 |
profile_dataset,
|
| 6 |
detect_data_quality_issues,
|
| 7 |
analyze_correlations,
|
| 8 |
-
get_smart_summary # NEW: Enhanced data summary
|
|
|
|
| 9 |
)
|
| 10 |
|
| 11 |
from .data_cleaning import (
|
|
@@ -36,6 +37,19 @@ from .model_training import (
|
|
| 36 |
generate_model_report
|
| 37 |
)
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
# Advanced Analysis Tools (5)
|
| 40 |
from .advanced_analysis import (
|
| 41 |
perform_eda_analysis,
|
|
@@ -90,13 +104,15 @@ from .nlp_text_analytics import (
|
|
| 90 |
perform_text_similarity
|
| 91 |
)
|
| 92 |
|
| 93 |
-
# Production/MLOps Tools (5)
|
| 94 |
from .production_mlops import (
|
| 95 |
monitor_model_drift,
|
| 96 |
explain_predictions,
|
| 97 |
generate_model_card,
|
| 98 |
perform_ab_test_analysis,
|
| 99 |
-
detect_feature_leakage
|
|
|
|
|
|
|
| 100 |
)
|
| 101 |
|
| 102 |
# Time Series Tools (3)
|
|
@@ -141,9 +157,10 @@ from .plotly_visualizations import (
|
|
| 141 |
generate_plotly_dashboard
|
| 142 |
)
|
| 143 |
|
| 144 |
-
# EDA Report Generation (
|
| 145 |
from .eda_reports import (
|
| 146 |
-
generate_ydata_profiling_report
|
|
|
|
| 147 |
)
|
| 148 |
|
| 149 |
# Code Interpreter (2) - NEW PHASE 2 - CRITICAL for True AI Agent
|
|
@@ -170,11 +187,12 @@ from .enhanced_feature_engineering import (
|
|
| 170 |
)
|
| 171 |
|
| 172 |
__all__ = [
|
| 173 |
-
# Basic Data Profiling (4) - UPDATED
|
| 174 |
"profile_dataset",
|
| 175 |
"detect_data_quality_issues",
|
| 176 |
"analyze_correlations",
|
| 177 |
"get_smart_summary", # NEW
|
|
|
|
| 178 |
|
| 179 |
# Basic Data Cleaning (3)
|
| 180 |
"clean_missing_values",
|
|
@@ -198,6 +216,17 @@ __all__ = [
|
|
| 198 |
"train_baseline_models",
|
| 199 |
"generate_model_report",
|
| 200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
# Advanced Analysis (5)
|
| 202 |
"perform_eda_analysis",
|
| 203 |
"detect_model_issues",
|
|
@@ -238,12 +267,14 @@ __all__ = [
|
|
| 238 |
"analyze_sentiment_advanced",
|
| 239 |
"perform_text_similarity",
|
| 240 |
|
| 241 |
-
# Production/MLOps (5)
|
| 242 |
"monitor_model_drift",
|
| 243 |
"explain_predictions",
|
| 244 |
"generate_model_card",
|
| 245 |
"perform_ab_test_analysis",
|
| 246 |
"detect_feature_leakage",
|
|
|
|
|
|
|
| 247 |
|
| 248 |
# Time Series (3)
|
| 249 |
"forecast_time_series",
|
|
@@ -277,8 +308,9 @@ __all__ = [
|
|
| 277 |
"generate_interactive_time_series",
|
| 278 |
"generate_plotly_dashboard",
|
| 279 |
|
| 280 |
-
# EDA Report Generation (
|
| 281 |
"generate_ydata_profiling_report",
|
|
|
|
| 282 |
|
| 283 |
# Code Interpreter (2) - NEW PHASE 2 - CRITICAL for True AI Agent
|
| 284 |
"execute_python_code",
|
|
|
|
| 5 |
profile_dataset,
|
| 6 |
detect_data_quality_issues,
|
| 7 |
analyze_correlations,
|
| 8 |
+
get_smart_summary, # NEW: Enhanced data summary
|
| 9 |
+
detect_label_errors # NEW: cleanlab label error detection
|
| 10 |
)
|
| 11 |
|
| 12 |
from .data_cleaning import (
|
|
|
|
| 37 |
generate_model_report
|
| 38 |
)
|
| 39 |
|
| 40 |
+
# AutoGluon-Powered Training (9) - Classification, Regression, Time Series, Optimization
|
| 41 |
+
from .autogluon_training import (
|
| 42 |
+
train_with_autogluon,
|
| 43 |
+
predict_with_autogluon,
|
| 44 |
+
forecast_with_autogluon,
|
| 45 |
+
optimize_autogluon_model,
|
| 46 |
+
analyze_autogluon_model,
|
| 47 |
+
extend_autogluon_training,
|
| 48 |
+
train_multilabel_autogluon,
|
| 49 |
+
backtest_timeseries,
|
| 50 |
+
analyze_timeseries_model
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
# Advanced Analysis Tools (5)
|
| 54 |
from .advanced_analysis import (
|
| 55 |
perform_eda_analysis,
|
|
|
|
| 104 |
perform_text_similarity
|
| 105 |
)
|
| 106 |
|
| 107 |
+
# Production/MLOps Tools (5 + 2 new)
|
| 108 |
from .production_mlops import (
|
| 109 |
monitor_model_drift,
|
| 110 |
explain_predictions,
|
| 111 |
generate_model_card,
|
| 112 |
perform_ab_test_analysis,
|
| 113 |
+
detect_feature_leakage,
|
| 114 |
+
monitor_drift_evidently, # NEW: Evidently drift reports
|
| 115 |
+
explain_with_dtreeviz # NEW: Decision tree visualization
|
| 116 |
)
|
| 117 |
|
| 118 |
# Time Series Tools (3)
|
|
|
|
| 157 |
generate_plotly_dashboard
|
| 158 |
)
|
| 159 |
|
| 160 |
+
# EDA Report Generation (2) - NEW PHASE 2
|
| 161 |
from .eda_reports import (
|
| 162 |
+
generate_ydata_profiling_report,
|
| 163 |
+
generate_sweetviz_report # NEW: Sweetviz EDA with comparison
|
| 164 |
)
|
| 165 |
|
| 166 |
# Code Interpreter (2) - NEW PHASE 2 - CRITICAL for True AI Agent
|
|
|
|
| 187 |
)
|
| 188 |
|
| 189 |
__all__ = [
|
| 190 |
+
# Basic Data Profiling (4 + 1 new) - UPDATED
|
| 191 |
"profile_dataset",
|
| 192 |
"detect_data_quality_issues",
|
| 193 |
"analyze_correlations",
|
| 194 |
"get_smart_summary", # NEW
|
| 195 |
+
"detect_label_errors", # NEW: cleanlab
|
| 196 |
|
| 197 |
# Basic Data Cleaning (3)
|
| 198 |
"clean_missing_values",
|
|
|
|
| 216 |
"train_baseline_models",
|
| 217 |
"generate_model_report",
|
| 218 |
|
| 219 |
+
# AutoGluon Training (9) - NEW
|
| 220 |
+
"train_with_autogluon",
|
| 221 |
+
"predict_with_autogluon",
|
| 222 |
+
"forecast_with_autogluon",
|
| 223 |
+
"optimize_autogluon_model",
|
| 224 |
+
"analyze_autogluon_model",
|
| 225 |
+
"extend_autogluon_training",
|
| 226 |
+
"train_multilabel_autogluon",
|
| 227 |
+
"backtest_timeseries",
|
| 228 |
+
"analyze_timeseries_model",
|
| 229 |
+
|
| 230 |
# Advanced Analysis (5)
|
| 231 |
"perform_eda_analysis",
|
| 232 |
"detect_model_issues",
|
|
|
|
| 267 |
"analyze_sentiment_advanced",
|
| 268 |
"perform_text_similarity",
|
| 269 |
|
| 270 |
+
# Production/MLOps (5 + 2 new)
|
| 271 |
"monitor_model_drift",
|
| 272 |
"explain_predictions",
|
| 273 |
"generate_model_card",
|
| 274 |
"perform_ab_test_analysis",
|
| 275 |
"detect_feature_leakage",
|
| 276 |
+
"monitor_drift_evidently", # NEW: Evidently
|
| 277 |
+
"explain_with_dtreeviz", # NEW: dtreeviz
|
| 278 |
|
| 279 |
# Time Series (3)
|
| 280 |
"forecast_time_series",
|
|
|
|
| 308 |
"generate_interactive_time_series",
|
| 309 |
"generate_plotly_dashboard",
|
| 310 |
|
| 311 |
+
# EDA Report Generation (2) - NEW PHASE 2
|
| 312 |
"generate_ydata_profiling_report",
|
| 313 |
+
"generate_sweetviz_report", # NEW: Sweetviz
|
| 314 |
|
| 315 |
# Code Interpreter (2) - NEW PHASE 2 - CRITICAL for True AI Agent
|
| 316 |
"execute_python_code",
|
src/tools/advanced_insights.py
CHANGED
|
@@ -466,14 +466,16 @@ def analyze_distribution(file_path: str,
|
|
| 466 |
|
| 467 |
def perform_segment_analysis(file_path: str,
|
| 468 |
n_segments: int = 5,
|
| 469 |
-
features: Optional[List[str]] = None
|
|
|
|
| 470 |
"""
|
| 471 |
Perform cluster-based segment analysis.
|
| 472 |
|
| 473 |
Args:
|
| 474 |
file_path: Path to dataset
|
| 475 |
-
n_segments: Number of segments to create
|
| 476 |
features: Features to use for clustering (all numeric if None)
|
|
|
|
| 477 |
|
| 478 |
Returns:
|
| 479 |
Dictionary with segment analysis results
|
|
@@ -498,18 +500,44 @@ def perform_segment_analysis(file_path: str,
|
|
| 498 |
X_scaled = scaler.fit_transform(X)
|
| 499 |
|
| 500 |
# Perform clustering
|
| 501 |
-
|
| 502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 503 |
|
| 504 |
# Add cluster labels to dataframe
|
| 505 |
df['segment'] = labels
|
| 506 |
|
| 507 |
-
# Analyze segments
|
|
|
|
| 508 |
segment_profiles = []
|
| 509 |
-
for
|
| 510 |
-
segment_data = df[df['segment'] ==
|
| 511 |
profile = {
|
| 512 |
-
"segment_id":
|
|
|
|
| 513 |
"size": len(segment_data),
|
| 514 |
"percentage": float((len(segment_data) / len(df)) * 100),
|
| 515 |
"characteristics": {}
|
|
@@ -525,20 +553,26 @@ def perform_segment_analysis(file_path: str,
|
|
| 525 |
segment_profiles.append(profile)
|
| 526 |
|
| 527 |
results = {
|
|
|
|
| 528 |
"n_segments": n_segments,
|
| 529 |
"features_used": features,
|
| 530 |
"total_samples": len(df),
|
| 531 |
"segments": segment_profiles,
|
| 532 |
"insights": [
|
| 533 |
-
f"🎯 Created {n_segments} segments from {len(df)} samples",
|
| 534 |
f"📊 Used {len(features)} features for segmentation"
|
| 535 |
]
|
| 536 |
}
|
| 537 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 538 |
# Find most distinctive features for each segment
|
| 539 |
-
for
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
|
|
|
| 543 |
|
| 544 |
return results
|
|
|
|
| 466 |
|
| 467 |
def perform_segment_analysis(file_path: str,
|
| 468 |
n_segments: int = 5,
|
| 469 |
+
features: Optional[List[str]] = None,
|
| 470 |
+
method: str = "kmeans") -> Dict[str, Any]:
|
| 471 |
"""
|
| 472 |
Perform cluster-based segment analysis.
|
| 473 |
|
| 474 |
Args:
|
| 475 |
file_path: Path to dataset
|
| 476 |
+
n_segments: Number of segments to create (ignored for HDBSCAN)
|
| 477 |
features: Features to use for clustering (all numeric if None)
|
| 478 |
+
method: Clustering method ('kmeans' or 'hdbscan')
|
| 479 |
|
| 480 |
Returns:
|
| 481 |
Dictionary with segment analysis results
|
|
|
|
| 500 |
X_scaled = scaler.fit_transform(X)
|
| 501 |
|
| 502 |
# Perform clustering
|
| 503 |
+
if method == "hdbscan":
|
| 504 |
+
try:
|
| 505 |
+
from sklearn.cluster import HDBSCAN as SklearnHDBSCAN
|
| 506 |
+
|
| 507 |
+
print("🔍 Using HDBSCAN for density-based segmentation...")
|
| 508 |
+
clusterer = SklearnHDBSCAN(
|
| 509 |
+
min_cluster_size=max(5, len(X) // 50),
|
| 510 |
+
min_samples=max(3, len(X) // 100),
|
| 511 |
+
cluster_selection_method='eom'
|
| 512 |
+
)
|
| 513 |
+
labels = clusterer.fit_predict(X_scaled)
|
| 514 |
+
|
| 515 |
+
# HDBSCAN assigns -1 to noise points
|
| 516 |
+
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
|
| 517 |
+
n_noise = int((labels == -1).sum())
|
| 518 |
+
n_segments = n_clusters
|
| 519 |
+
|
| 520 |
+
print(f" Found {n_clusters} clusters + {n_noise} noise points")
|
| 521 |
+
|
| 522 |
+
except ImportError:
|
| 523 |
+
print("⚠️ HDBSCAN not available (requires scikit-learn >= 1.3). Falling back to KMeans.")
|
| 524 |
+
method = "kmeans"
|
| 525 |
+
|
| 526 |
+
if method == "kmeans":
|
| 527 |
+
kmeans = KMeans(n_clusters=n_segments, random_state=42, n_init=10)
|
| 528 |
+
labels = kmeans.fit_predict(X_scaled)
|
| 529 |
|
| 530 |
# Add cluster labels to dataframe
|
| 531 |
df['segment'] = labels
|
| 532 |
|
| 533 |
+
# Analyze segments (include noise cluster -1 for HDBSCAN)
|
| 534 |
+
unique_labels = sorted(set(labels))
|
| 535 |
segment_profiles = []
|
| 536 |
+
for label in unique_labels:
|
| 537 |
+
segment_data = df[df['segment'] == label]
|
| 538 |
profile = {
|
| 539 |
+
"segment_id": int(label),
|
| 540 |
+
"label": "noise" if label == -1 else f"cluster_{label}",
|
| 541 |
"size": len(segment_data),
|
| 542 |
"percentage": float((len(segment_data) / len(df)) * 100),
|
| 543 |
"characteristics": {}
|
|
|
|
| 553 |
segment_profiles.append(profile)
|
| 554 |
|
| 555 |
results = {
|
| 556 |
+
"method": method,
|
| 557 |
"n_segments": n_segments,
|
| 558 |
"features_used": features,
|
| 559 |
"total_samples": len(df),
|
| 560 |
"segments": segment_profiles,
|
| 561 |
"insights": [
|
| 562 |
+
f"🎯 Created {n_segments} segments from {len(df)} samples using {method.upper()}",
|
| 563 |
f"📊 Used {len(features)} features for segmentation"
|
| 564 |
]
|
| 565 |
}
|
| 566 |
|
| 567 |
+
if method == "hdbscan" and n_noise > 0:
|
| 568 |
+
results["noise_points"] = n_noise
|
| 569 |
+
results["insights"].append(f"🔇 {n_noise} samples classified as noise (outliers)")
|
| 570 |
+
|
| 571 |
# Find most distinctive features for each segment
|
| 572 |
+
for profile in segment_profiles:
|
| 573 |
+
if profile["segment_id"] != -1:
|
| 574 |
+
results["insights"].append(
|
| 575 |
+
f"Segment {profile['segment_id']}: {profile['size']} samples ({profile['percentage']:.1f}%)"
|
| 576 |
+
)
|
| 577 |
|
| 578 |
return results
|
src/tools/advanced_preprocessing.py
CHANGED
|
@@ -195,6 +195,8 @@ def perform_feature_scaling(
|
|
| 195 |
- 'standard': StandardScaler (mean=0, std=1)
|
| 196 |
- 'minmax': MinMaxScaler (range 0-1)
|
| 197 |
- 'robust': RobustScaler (median, IQR - robust to outliers)
|
|
|
|
|
|
|
| 198 |
columns: List of columns to scale (None = all numeric columns)
|
| 199 |
output_path: Path to save scaled dataset
|
| 200 |
scaler_save_path: Path to save fitted scaler for future use
|
|
@@ -231,8 +233,16 @@ def perform_feature_scaling(
|
|
| 231 |
scaler = MinMaxScaler()
|
| 232 |
elif scaler_type == "robust":
|
| 233 |
scaler = RobustScaler()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
else:
|
| 235 |
-
raise ValueError(f"Unsupported scaler_type: {scaler_type}")
|
| 236 |
|
| 237 |
# Get original statistics
|
| 238 |
original_stats = {}
|
|
|
|
| 195 |
- 'standard': StandardScaler (mean=0, std=1)
|
| 196 |
- 'minmax': MinMaxScaler (range 0-1)
|
| 197 |
- 'robust': RobustScaler (median, IQR - robust to outliers)
|
| 198 |
+
- 'power': PowerTransformer (Yeo-Johnson, makes data more Gaussian)
|
| 199 |
+
- 'quantile': QuantileTransformer (uniform or normal output distribution)
|
| 200 |
columns: List of columns to scale (None = all numeric columns)
|
| 201 |
output_path: Path to save scaled dataset
|
| 202 |
scaler_save_path: Path to save fitted scaler for future use
|
|
|
|
| 233 |
scaler = MinMaxScaler()
|
| 234 |
elif scaler_type == "robust":
|
| 235 |
scaler = RobustScaler()
|
| 236 |
+
elif scaler_type == "power":
|
| 237 |
+
from sklearn.preprocessing import PowerTransformer
|
| 238 |
+
scaler = PowerTransformer(method='yeo-johnson', standardize=True)
|
| 239 |
+
print(" 📐 Using Yeo-Johnson PowerTransformer (makes data more Gaussian)")
|
| 240 |
+
elif scaler_type == "quantile":
|
| 241 |
+
from sklearn.preprocessing import QuantileTransformer
|
| 242 |
+
scaler = QuantileTransformer(output_distribution='normal', random_state=42, n_quantiles=min(1000, len(df)))
|
| 243 |
+
print(" 📐 Using QuantileTransformer (maps to normal distribution)")
|
| 244 |
else:
|
| 245 |
+
raise ValueError(f"Unsupported scaler_type: {scaler_type}. Use 'standard', 'minmax', 'robust', 'power', or 'quantile'.")
|
| 246 |
|
| 247 |
# Get original statistics
|
| 248 |
original_stats = {}
|
src/tools/advanced_training.py
CHANGED
|
@@ -73,7 +73,7 @@ def hyperparameter_tuning(
|
|
| 73 |
Args:
|
| 74 |
file_path: Path to prepared dataset
|
| 75 |
target_col: Target column name
|
| 76 |
-
model_type: Model to tune ('random_forest', 'xgboost', 'logistic', 'ridge')
|
| 77 |
task_type: 'classification', 'regression', or 'auto' (detect from target)
|
| 78 |
n_trials: Number of optimization trials (default 50, auto-reduced for large datasets)
|
| 79 |
cv_folds: Number of cross-validation folds
|
|
@@ -243,8 +243,45 @@ def hyperparameter_tuning(
|
|
| 243 |
'random_state': random_state
|
| 244 |
}
|
| 245 |
model = Ridge(**params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
else:
|
| 247 |
-
raise ValueError(f"Unsupported model_type: {model_type}")
|
| 248 |
|
| 249 |
# Cross-validation
|
| 250 |
if task_type == "classification":
|
|
|
|
| 73 |
Args:
|
| 74 |
file_path: Path to prepared dataset
|
| 75 |
target_col: Target column name
|
| 76 |
+
model_type: Model to tune ('random_forest', 'xgboost', 'lightgbm', 'catboost', 'logistic', 'ridge')
|
| 77 |
task_type: 'classification', 'regression', or 'auto' (detect from target)
|
| 78 |
n_trials: Number of optimization trials (default 50, auto-reduced for large datasets)
|
| 79 |
cv_folds: Number of cross-validation folds
|
|
|
|
| 243 |
'random_state': random_state
|
| 244 |
}
|
| 245 |
model = Ridge(**params)
|
| 246 |
+
|
| 247 |
+
elif model_type == "lightgbm":
|
| 248 |
+
from lightgbm import LGBMClassifier, LGBMRegressor
|
| 249 |
+
params = {
|
| 250 |
+
'n_estimators': trial.suggest_int('n_estimators', 50, 500),
|
| 251 |
+
'max_depth': trial.suggest_int('max_depth', 3, 12),
|
| 252 |
+
'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3, log=True),
|
| 253 |
+
'subsample': trial.suggest_float('subsample', 0.5, 1.0),
|
| 254 |
+
'colsample_bytree': trial.suggest_float('colsample_bytree', 0.5, 1.0),
|
| 255 |
+
'reg_alpha': trial.suggest_float('reg_alpha', 1e-8, 10.0, log=True),
|
| 256 |
+
'reg_lambda': trial.suggest_float('reg_lambda', 1e-8, 10.0, log=True),
|
| 257 |
+
'num_leaves': trial.suggest_int('num_leaves', 15, 127),
|
| 258 |
+
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
|
| 259 |
+
'random_state': random_state,
|
| 260 |
+
'verbosity': -1
|
| 261 |
+
}
|
| 262 |
+
if task_type == "classification":
|
| 263 |
+
model = LGBMClassifier(**params)
|
| 264 |
+
else:
|
| 265 |
+
model = LGBMRegressor(**params)
|
| 266 |
+
|
| 267 |
+
elif model_type == "catboost":
|
| 268 |
+
from catboost import CatBoostClassifier, CatBoostRegressor
|
| 269 |
+
params = {
|
| 270 |
+
'iterations': trial.suggest_int('iterations', 50, 500),
|
| 271 |
+
'depth': trial.suggest_int('depth', 3, 10),
|
| 272 |
+
'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3, log=True),
|
| 273 |
+
'l2_leaf_reg': trial.suggest_float('l2_leaf_reg', 1e-8, 10.0, log=True),
|
| 274 |
+
'bagging_temperature': trial.suggest_float('bagging_temperature', 0, 10),
|
| 275 |
+
'random_strength': trial.suggest_float('random_strength', 0, 10),
|
| 276 |
+
'random_seed': random_state,
|
| 277 |
+
'verbose': 0
|
| 278 |
+
}
|
| 279 |
+
if task_type == "classification":
|
| 280 |
+
model = CatBoostClassifier(**params)
|
| 281 |
+
else:
|
| 282 |
+
model = CatBoostRegressor(**params)
|
| 283 |
else:
|
| 284 |
+
raise ValueError(f"Unsupported model_type: {model_type}. Use 'random_forest', 'xgboost', 'lightgbm', 'catboost', 'logistic', or 'ridge'.")
|
| 285 |
|
| 286 |
# Cross-validation
|
| 287 |
if task_type == "classification":
|
src/tools/agent_tool_mapping.py
CHANGED
|
@@ -42,6 +42,12 @@ TOOL_CATEGORIES = {
|
|
| 42 |
],
|
| 43 |
"modeling": [
|
| 44 |
"train_baseline_models",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
"hyperparameter_tuning",
|
| 46 |
"perform_cross_validation",
|
| 47 |
"train_ensemble_models",
|
|
@@ -53,6 +59,9 @@ TOOL_CATEGORIES = {
|
|
| 53 |
"decompose_time_series",
|
| 54 |
"forecast_arima",
|
| 55 |
"forecast_prophet",
|
|
|
|
|
|
|
|
|
|
| 56 |
"detect_anomalies_time_series",
|
| 57 |
],
|
| 58 |
"nlp": [
|
|
|
|
| 42 |
],
|
| 43 |
"modeling": [
|
| 44 |
"train_baseline_models",
|
| 45 |
+
"train_with_autogluon",
|
| 46 |
+
"predict_with_autogluon",
|
| 47 |
+
"optimize_autogluon_model",
|
| 48 |
+
"analyze_autogluon_model",
|
| 49 |
+
"extend_autogluon_training",
|
| 50 |
+
"train_multilabel_autogluon",
|
| 51 |
"hyperparameter_tuning",
|
| 52 |
"perform_cross_validation",
|
| 53 |
"train_ensemble_models",
|
|
|
|
| 59 |
"decompose_time_series",
|
| 60 |
"forecast_arima",
|
| 61 |
"forecast_prophet",
|
| 62 |
+
"forecast_with_autogluon",
|
| 63 |
+
"backtest_timeseries",
|
| 64 |
+
"analyze_timeseries_model",
|
| 65 |
"detect_anomalies_time_series",
|
| 66 |
],
|
| 67 |
"nlp": [
|
src/tools/auto_pipeline.py
CHANGED
|
@@ -239,7 +239,7 @@ def auto_feature_selection(file_path: str,
|
|
| 239 |
target_col: Target column
|
| 240 |
task_type: 'classification', 'regression', or 'auto'
|
| 241 |
max_features: Maximum number of features to keep
|
| 242 |
-
method: 'mutual_info', 'f_test', or 'auto'
|
| 243 |
output_path: Where to save selected features
|
| 244 |
|
| 245 |
Returns:
|
|
@@ -276,6 +276,60 @@ def auto_feature_selection(file_path: str,
|
|
| 276 |
# Perform selection
|
| 277 |
n_features_to_select = min(max_features, len(numeric_features))
|
| 278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
if method == "mutual_info":
|
| 280 |
if task_type == "classification":
|
| 281 |
selector = SelectKBest(mutual_info_classif, k=n_features_to_select)
|
|
|
|
| 239 |
target_col: Target column
|
| 240 |
task_type: 'classification', 'regression', or 'auto'
|
| 241 |
max_features: Maximum number of features to keep
|
| 242 |
+
method: 'mutual_info', 'f_test', 'boruta', or 'auto'
|
| 243 |
output_path: Where to save selected features
|
| 244 |
|
| 245 |
Returns:
|
|
|
|
| 276 |
# Perform selection
|
| 277 |
n_features_to_select = min(max_features, len(numeric_features))
|
| 278 |
|
| 279 |
+
if method == "boruta":
|
| 280 |
+
# BorutaPy - all-relevant feature selection
|
| 281 |
+
try:
|
| 282 |
+
from boruta import BorutaPy
|
| 283 |
+
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
|
| 284 |
+
|
| 285 |
+
print("🔍 Running BorutaPy all-relevant feature selection...")
|
| 286 |
+
|
| 287 |
+
if task_type == "classification":
|
| 288 |
+
rf = RandomForestClassifier(n_jobs=-1, max_depth=5, random_state=42)
|
| 289 |
+
else:
|
| 290 |
+
rf = RandomForestRegressor(n_jobs=-1, max_depth=5, random_state=42)
|
| 291 |
+
|
| 292 |
+
boruta_selector = BorutaPy(
|
| 293 |
+
rf,
|
| 294 |
+
n_estimators='auto',
|
| 295 |
+
max_iter=100,
|
| 296 |
+
random_state=42,
|
| 297 |
+
verbose=0
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
X_filled = X_numeric.fillna(0).values
|
| 301 |
+
boruta_selector.fit(X_filled, y.values if hasattr(y, 'values') else y)
|
| 302 |
+
|
| 303 |
+
# Get selected features
|
| 304 |
+
selected_mask = boruta_selector.support_
|
| 305 |
+
selected_features = np.array(numeric_features)[selected_mask].tolist()
|
| 306 |
+
|
| 307 |
+
# Get ranking
|
| 308 |
+
feature_scores = dict(zip(numeric_features, boruta_selector.ranking_.tolist()))
|
| 309 |
+
sorted_features = sorted(feature_scores.items(), key=lambda x: x[1])
|
| 310 |
+
|
| 311 |
+
results = {
|
| 312 |
+
"n_features_original": len(numeric_features),
|
| 313 |
+
"n_features_selected": len(selected_features),
|
| 314 |
+
"selected_features": selected_features,
|
| 315 |
+
"feature_rankings": dict(sorted_features),
|
| 316 |
+
"tentative_features": np.array(numeric_features)[boruta_selector.support_weak_].tolist(),
|
| 317 |
+
"selection_method": "boruta",
|
| 318 |
+
"task_type": task_type
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
# Save selected features + target
|
| 322 |
+
if output_path:
|
| 323 |
+
df_selected = df[selected_features + [target_col]]
|
| 324 |
+
df_selected.to_csv(output_path, index=False)
|
| 325 |
+
results["output_path"] = output_path
|
| 326 |
+
|
| 327 |
+
return results
|
| 328 |
+
|
| 329 |
+
except ImportError:
|
| 330 |
+
print("⚠️ boruta not installed. Falling back to mutual_info. Install with: pip install boruta>=0.3")
|
| 331 |
+
method = "mutual_info" if task_type == "classification" else "f_test"
|
| 332 |
+
|
| 333 |
if method == "mutual_info":
|
| 334 |
if task_type == "classification":
|
| 335 |
selector = SelectKBest(mutual_info_classif, k=n_features_to_select)
|
src/tools/autogluon_training.py
ADDED
|
@@ -0,0 +1,1480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AutoGluon-Powered Training Tools
|
| 3 |
+
Replaces manual model training with AutoGluon's automated ML for better accuracy,
|
| 4 |
+
automatic ensembling, and built-in handling of raw data (no pre-encoding needed).
|
| 5 |
+
|
| 6 |
+
Supports:
|
| 7 |
+
- Classification (binary + multiclass)
|
| 8 |
+
- Regression
|
| 9 |
+
- Time Series Forecasting (NEW capability)
|
| 10 |
+
|
| 11 |
+
Scalability safeguards:
|
| 12 |
+
- time_limit prevents runaway training
|
| 13 |
+
- presets control compute budget
|
| 14 |
+
- num_cpus capped to avoid hogging shared resources
|
| 15 |
+
- Memory-aware: excludes heavy models on limited RAM
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import json
|
| 20 |
+
import time
|
| 21 |
+
import shutil
|
| 22 |
+
import warnings
|
| 23 |
+
from typing import Dict, Any, Optional, List
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
import pandas as pd
|
| 27 |
+
import numpy as np
|
| 28 |
+
|
| 29 |
+
warnings.filterwarnings('ignore')
|
| 30 |
+
|
| 31 |
+
# Lazy import AutoGluon to avoid slow startup
|
| 32 |
+
AUTOGLUON_TABULAR_AVAILABLE = False
|
| 33 |
+
AUTOGLUON_TIMESERIES_AVAILABLE = False
|
| 34 |
+
|
| 35 |
+
def _ensure_autogluon_tabular():
|
| 36 |
+
global AUTOGLUON_TABULAR_AVAILABLE
|
| 37 |
+
try:
|
| 38 |
+
from autogluon.tabular import TabularPredictor, TabularDataset
|
| 39 |
+
AUTOGLUON_TABULAR_AVAILABLE = True
|
| 40 |
+
return TabularPredictor, TabularDataset
|
| 41 |
+
except ImportError:
|
| 42 |
+
raise ImportError(
|
| 43 |
+
"AutoGluon tabular not installed. Run: pip install autogluon.tabular"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def _ensure_autogluon_timeseries():
|
| 47 |
+
global AUTOGLUON_TIMESERIES_AVAILABLE
|
| 48 |
+
try:
|
| 49 |
+
from autogluon.timeseries import TimeSeriesPredictor, TimeSeriesDataFrame
|
| 50 |
+
AUTOGLUON_TIMESERIES_AVAILABLE = True
|
| 51 |
+
return TimeSeriesPredictor, TimeSeriesDataFrame
|
| 52 |
+
except ImportError:
|
| 53 |
+
raise ImportError(
|
| 54 |
+
"AutoGluon timeseries not installed. Run: pip install autogluon.timeseries"
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# ============================================================
|
| 59 |
+
# RESOURCE CONFIGURATION
|
| 60 |
+
# Adapt to deployment environment (HF Spaces, local, cloud)
|
| 61 |
+
# ============================================================
|
| 62 |
+
|
| 63 |
+
def _get_resource_config() -> Dict[str, Any]:
|
| 64 |
+
"""
|
| 65 |
+
Detect available resources and return safe training config.
|
| 66 |
+
Prevents AutoGluon from consuming too much memory/CPU on shared infra.
|
| 67 |
+
"""
|
| 68 |
+
import psutil
|
| 69 |
+
|
| 70 |
+
total_ram_gb = psutil.virtual_memory().total / (1024 ** 3)
|
| 71 |
+
cpu_count = os.cpu_count() or 2
|
| 72 |
+
|
| 73 |
+
# Conservative defaults for shared environments (HF Spaces = 16GB, 2-8 vCPU)
|
| 74 |
+
config = {
|
| 75 |
+
"num_cpus": min(cpu_count, 4), # Cap at 4 to leave room for other users
|
| 76 |
+
"num_gpus": 0, # No GPU on free HF Spaces
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
if total_ram_gb < 8:
|
| 80 |
+
config["presets"] = "medium_quality"
|
| 81 |
+
config["excluded_model_types"] = ["NN_TORCH", "FASTAI", "KNN"]
|
| 82 |
+
config["time_limit"] = 60
|
| 83 |
+
elif total_ram_gb < 16:
|
| 84 |
+
config["presets"] = "medium_quality"
|
| 85 |
+
config["excluded_model_types"] = ["NN_TORCH", "FASTAI"]
|
| 86 |
+
config["time_limit"] = 120
|
| 87 |
+
else:
|
| 88 |
+
config["presets"] = "best_quality"
|
| 89 |
+
config["excluded_model_types"] = ["NN_TORCH"] # Still skip neural nets for speed
|
| 90 |
+
config["time_limit"] = 180
|
| 91 |
+
|
| 92 |
+
return config
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# ============================================================
|
| 96 |
+
# TABULAR: Classification + Regression
|
| 97 |
+
# ============================================================
|
| 98 |
+
|
| 99 |
+
def train_with_autogluon(
|
| 100 |
+
file_path: str,
|
| 101 |
+
target_col: str,
|
| 102 |
+
task_type: str = "auto",
|
| 103 |
+
time_limit: int = 120,
|
| 104 |
+
presets: str = "medium_quality",
|
| 105 |
+
eval_metric: Optional[str] = None,
|
| 106 |
+
output_dir: Optional[str] = None,
|
| 107 |
+
infer_limit: Optional[float] = None
|
| 108 |
+
) -> Dict[str, Any]:
|
| 109 |
+
"""
|
| 110 |
+
Train ML models using AutoGluon's automated approach.
|
| 111 |
+
|
| 112 |
+
Handles raw data directly — no need to pre-encode categoricals or impute missing values.
|
| 113 |
+
Automatically trains multiple models, performs stacking, and returns the best ensemble.
|
| 114 |
+
|
| 115 |
+
Supports: classification (binary/multiclass), regression.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
file_path: Path to CSV/Parquet dataset
|
| 119 |
+
target_col: Column to predict
|
| 120 |
+
task_type: 'classification', 'regression', or 'auto' (auto-detected)
|
| 121 |
+
time_limit: Max training time in seconds (default 120 = 2 minutes)
|
| 122 |
+
presets: Quality preset - 'medium_quality' (fast), 'best_quality' (slower, better),
|
| 123 |
+
'good_quality' (balanced)
|
| 124 |
+
eval_metric: Metric to optimize (auto-selected if None).
|
| 125 |
+
Classification: 'accuracy', 'f1', 'roc_auc', 'log_loss'
|
| 126 |
+
Regression: 'rmse', 'mae', 'r2', 'mape'
|
| 127 |
+
output_dir: Where to save trained model (default: ./outputs/autogluon_model)
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
Dictionary with training results, leaderboard, best model info, and feature importance
|
| 131 |
+
"""
|
| 132 |
+
TabularPredictor, TabularDataset = _ensure_autogluon_tabular()
|
| 133 |
+
|
| 134 |
+
start_time = time.time()
|
| 135 |
+
output_dir = output_dir or "./outputs/autogluon_model"
|
| 136 |
+
|
| 137 |
+
# ── Validate input ──
|
| 138 |
+
if not Path(file_path).exists():
|
| 139 |
+
return {"status": "error", "message": f"File not found: {file_path}"}
|
| 140 |
+
|
| 141 |
+
# ── Load data ──
|
| 142 |
+
print(f"\n🚀 AutoGluon Training Starting...")
|
| 143 |
+
print(f" 📁 Dataset: {file_path}")
|
| 144 |
+
print(f" 🎯 Target: {target_col}")
|
| 145 |
+
print(f" ⏱️ Time limit: {time_limit}s")
|
| 146 |
+
print(f" 📊 Presets: {presets}")
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
train_data = TabularDataset(file_path)
|
| 150 |
+
except Exception as e:
|
| 151 |
+
return {"status": "error", "message": f"Failed to load data: {str(e)}"}
|
| 152 |
+
|
| 153 |
+
if target_col not in train_data.columns:
|
| 154 |
+
return {
|
| 155 |
+
"status": "error",
|
| 156 |
+
"message": f"Target column '{target_col}' not found. Available: {list(train_data.columns)}"
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
n_rows, n_cols = train_data.shape
|
| 160 |
+
print(f" 📐 Shape: {n_rows:,} rows × {n_cols} columns")
|
| 161 |
+
|
| 162 |
+
# ── Get resource-aware config ──
|
| 163 |
+
resource_config = _get_resource_config()
|
| 164 |
+
|
| 165 |
+
# User overrides take priority
|
| 166 |
+
effective_time_limit = min(time_limit, resource_config["time_limit"])
|
| 167 |
+
effective_presets = presets
|
| 168 |
+
|
| 169 |
+
# ── Auto-detect task type ──
|
| 170 |
+
if task_type == "auto":
|
| 171 |
+
n_unique = train_data[target_col].nunique()
|
| 172 |
+
if n_unique <= 20 or train_data[target_col].dtype == 'object':
|
| 173 |
+
task_type = "classification"
|
| 174 |
+
if n_unique == 2:
|
| 175 |
+
task_type_detail = "binary"
|
| 176 |
+
else:
|
| 177 |
+
task_type_detail = "multiclass"
|
| 178 |
+
else:
|
| 179 |
+
task_type = "regression"
|
| 180 |
+
task_type_detail = "regression"
|
| 181 |
+
else:
|
| 182 |
+
task_type_detail = task_type
|
| 183 |
+
|
| 184 |
+
# ── Select eval metric ──
|
| 185 |
+
if eval_metric is None:
|
| 186 |
+
if task_type == "classification":
|
| 187 |
+
eval_metric = "f1_weighted" if task_type_detail == "multiclass" else "f1"
|
| 188 |
+
else:
|
| 189 |
+
eval_metric = "root_mean_squared_error"
|
| 190 |
+
|
| 191 |
+
print(f" 🔍 Task type: {task_type_detail}")
|
| 192 |
+
print(f" 📏 Eval metric: {eval_metric}")
|
| 193 |
+
print(f" 🔧 Excluded models: {resource_config.get('excluded_model_types', [])}")
|
| 194 |
+
|
| 195 |
+
# ── Clean output directory (AutoGluon needs fresh dir) ──
|
| 196 |
+
if Path(output_dir).exists():
|
| 197 |
+
shutil.rmtree(output_dir, ignore_errors=True)
|
| 198 |
+
|
| 199 |
+
# ── Train ──
|
| 200 |
+
try:
|
| 201 |
+
predictor = TabularPredictor(
|
| 202 |
+
label=target_col,
|
| 203 |
+
eval_metric=eval_metric,
|
| 204 |
+
path=output_dir,
|
| 205 |
+
problem_type=task_type if task_type != "auto" else None
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
fit_kwargs = dict(
|
| 209 |
+
train_data=train_data,
|
| 210 |
+
time_limit=effective_time_limit,
|
| 211 |
+
presets=effective_presets,
|
| 212 |
+
excluded_model_types=resource_config.get("excluded_model_types", []),
|
| 213 |
+
num_cpus=resource_config["num_cpus"],
|
| 214 |
+
num_gpus=resource_config["num_gpus"],
|
| 215 |
+
verbosity=1
|
| 216 |
+
)
|
| 217 |
+
if infer_limit is not None:
|
| 218 |
+
fit_kwargs["infer_limit"] = infer_limit
|
| 219 |
+
|
| 220 |
+
predictor.fit(**fit_kwargs)
|
| 221 |
+
except Exception as e:
|
| 222 |
+
return {"status": "error", "message": f"Training failed: {str(e)}"}
|
| 223 |
+
|
| 224 |
+
elapsed = time.time() - start_time
|
| 225 |
+
|
| 226 |
+
# ── Extract results ──
|
| 227 |
+
leaderboard = predictor.leaderboard(silent=True)
|
| 228 |
+
|
| 229 |
+
# Convert leaderboard to serializable format
|
| 230 |
+
leaderboard_data = []
|
| 231 |
+
for _, row in leaderboard.head(10).iterrows():
|
| 232 |
+
entry = {
|
| 233 |
+
"model": str(row.get("model", "")),
|
| 234 |
+
"score_val": round(float(row.get("score_val", 0)), 4),
|
| 235 |
+
"fit_time": round(float(row.get("fit_time", 0)), 1),
|
| 236 |
+
"pred_time_val": round(float(row.get("pred_time_val", 0)), 3),
|
| 237 |
+
}
|
| 238 |
+
if "stack_level" in row:
|
| 239 |
+
entry["stack_level"] = int(row["stack_level"])
|
| 240 |
+
leaderboard_data.append(entry)
|
| 241 |
+
|
| 242 |
+
# Best model info
|
| 243 |
+
best_model = predictor.model_best
|
| 244 |
+
best_score = float(leaderboard.iloc[0]["score_val"]) if len(leaderboard) > 0 else None
|
| 245 |
+
|
| 246 |
+
# Feature importance (top 20)
|
| 247 |
+
feature_importance_data = []
|
| 248 |
+
try:
|
| 249 |
+
fi = predictor.feature_importance(train_data, silent=True)
|
| 250 |
+
for feat, row in fi.head(20).iterrows():
|
| 251 |
+
feature_importance_data.append({
|
| 252 |
+
"feature": str(feat),
|
| 253 |
+
"importance": round(float(row.get("importance", 0)), 4),
|
| 254 |
+
"p_value": round(float(row.get("p_value", 1)), 4) if "p_value" in row else None
|
| 255 |
+
})
|
| 256 |
+
except Exception:
|
| 257 |
+
# feature_importance can fail on some model types
|
| 258 |
+
pass
|
| 259 |
+
|
| 260 |
+
# Model count
|
| 261 |
+
n_models = len(leaderboard)
|
| 262 |
+
|
| 263 |
+
# Summary
|
| 264 |
+
results = {
|
| 265 |
+
"status": "success",
|
| 266 |
+
"task_type": task_type_detail,
|
| 267 |
+
"eval_metric": eval_metric,
|
| 268 |
+
"best_model": best_model,
|
| 269 |
+
"best_score": best_score,
|
| 270 |
+
"n_models_trained": n_models,
|
| 271 |
+
"n_rows": n_rows,
|
| 272 |
+
"n_features": n_cols - 1,
|
| 273 |
+
"training_time_seconds": round(elapsed, 1),
|
| 274 |
+
"time_limit_used": effective_time_limit,
|
| 275 |
+
"presets": effective_presets,
|
| 276 |
+
"leaderboard": leaderboard_data,
|
| 277 |
+
"feature_importance": feature_importance_data,
|
| 278 |
+
"model_path": output_dir,
|
| 279 |
+
"output_path": output_dir,
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
# ── Print summary ──
|
| 283 |
+
print(f"\n{'='*60}")
|
| 284 |
+
print(f"✅ AUTOGLUON TRAINING COMPLETE")
|
| 285 |
+
print(f"{'='*60}")
|
| 286 |
+
print(f"📊 Models trained: {n_models}")
|
| 287 |
+
print(f"🏆 Best model: {best_model}")
|
| 288 |
+
print(f"📈 Best {eval_metric}: {best_score:.4f}" if best_score else "")
|
| 289 |
+
print(f"⏱️ Total time: {elapsed:.1f}s")
|
| 290 |
+
print(f"💾 Model saved: {output_dir}")
|
| 291 |
+
if leaderboard_data:
|
| 292 |
+
print(f"\n📋 Top 5 Leaderboard:")
|
| 293 |
+
for i, entry in enumerate(leaderboard_data[:5], 1):
|
| 294 |
+
print(f" {i}. {entry['model']}: {entry['score_val']:.4f} (fit: {entry['fit_time']:.1f}s)")
|
| 295 |
+
if feature_importance_data:
|
| 296 |
+
print(f"\n🔑 Top 5 Features:")
|
| 297 |
+
for fi_entry in feature_importance_data[:5]:
|
| 298 |
+
print(f" • {fi_entry['feature']}: {fi_entry['importance']:.4f}")
|
| 299 |
+
print(f"{'='*60}\n")
|
| 300 |
+
|
| 301 |
+
return results
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def predict_with_autogluon(
|
| 305 |
+
model_path: str,
|
| 306 |
+
data_path: str,
|
| 307 |
+
output_path: Optional[str] = None
|
| 308 |
+
) -> Dict[str, Any]:
|
| 309 |
+
"""
|
| 310 |
+
Make predictions using a trained AutoGluon model.
|
| 311 |
+
|
| 312 |
+
Args:
|
| 313 |
+
model_path: Path to saved AutoGluon model directory
|
| 314 |
+
data_path: Path to new data for prediction
|
| 315 |
+
output_path: Path to save predictions CSV (optional)
|
| 316 |
+
|
| 317 |
+
Returns:
|
| 318 |
+
Dictionary with predictions and metadata
|
| 319 |
+
"""
|
| 320 |
+
TabularPredictor, TabularDataset = _ensure_autogluon_tabular()
|
| 321 |
+
|
| 322 |
+
if not Path(model_path).exists():
|
| 323 |
+
return {"status": "error", "message": f"Model not found: {model_path}"}
|
| 324 |
+
if not Path(data_path).exists():
|
| 325 |
+
return {"status": "error", "message": f"Data not found: {data_path}"}
|
| 326 |
+
|
| 327 |
+
try:
|
| 328 |
+
predictor = TabularPredictor.load(model_path)
|
| 329 |
+
test_data = TabularDataset(data_path)
|
| 330 |
+
|
| 331 |
+
predictions = predictor.predict(test_data)
|
| 332 |
+
|
| 333 |
+
output_path = output_path or "./outputs/autogluon_predictions.csv"
|
| 334 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 335 |
+
|
| 336 |
+
result_df = test_data.copy()
|
| 337 |
+
result_df["prediction"] = predictions.values
|
| 338 |
+
result_df.to_csv(output_path, index=False)
|
| 339 |
+
|
| 340 |
+
# Prediction probabilities for classification
|
| 341 |
+
probabilities = None
|
| 342 |
+
try:
|
| 343 |
+
proba = predictor.predict_proba(test_data)
|
| 344 |
+
probabilities = {
|
| 345 |
+
"columns": list(proba.columns),
|
| 346 |
+
"sample": proba.head(5).to_dict()
|
| 347 |
+
}
|
| 348 |
+
except Exception:
|
| 349 |
+
pass
|
| 350 |
+
|
| 351 |
+
return {
|
| 352 |
+
"status": "success",
|
| 353 |
+
"n_predictions": len(predictions),
|
| 354 |
+
"prediction_sample": predictions.head(10).tolist(),
|
| 355 |
+
"output_path": output_path,
|
| 356 |
+
"model_used": predictor.model_best,
|
| 357 |
+
"probabilities": probabilities
|
| 358 |
+
}
|
| 359 |
+
except Exception as e:
|
| 360 |
+
return {"status": "error", "message": f"Prediction failed: {str(e)}"}
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
# ============================================================
|
| 364 |
+
# TIME SERIES FORECASTING
|
| 365 |
+
# ============================================================
|
| 366 |
+
|
| 367 |
+
def forecast_with_autogluon(
|
| 368 |
+
file_path: str,
|
| 369 |
+
target_col: str,
|
| 370 |
+
time_col: str,
|
| 371 |
+
forecast_horizon: int = 30,
|
| 372 |
+
id_col: Optional[str] = None,
|
| 373 |
+
freq: Optional[str] = None,
|
| 374 |
+
time_limit: int = 120,
|
| 375 |
+
presets: str = "medium_quality",
|
| 376 |
+
output_path: Optional[str] = None,
|
| 377 |
+
static_features_path: Optional[str] = None,
|
| 378 |
+
known_covariates_cols: Optional[List[str]] = None,
|
| 379 |
+
holiday_country: Optional[str] = None,
|
| 380 |
+
fill_missing: bool = True,
|
| 381 |
+
models: Optional[List[str]] = None,
|
| 382 |
+
quantile_levels: Optional[List[float]] = None
|
| 383 |
+
) -> Dict[str, Any]:
|
| 384 |
+
"""
|
| 385 |
+
Forecast time series using AutoGluon's TimeSeriesPredictor.
|
| 386 |
+
|
| 387 |
+
Supports multiple forecasting models automatically: DeepAR, ETS, ARIMA, Theta,
|
| 388 |
+
Chronos (foundation model), and statistical ensembles.
|
| 389 |
+
Enhanced with covariates, holiday features, model selection, and quantile forecasting.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
file_path: Path to time series CSV/Parquet
|
| 393 |
+
target_col: Column with values to forecast
|
| 394 |
+
time_col: Column with timestamps/dates
|
| 395 |
+
forecast_horizon: Number of future periods to predict
|
| 396 |
+
id_col: Column identifying different series (for multi-series)
|
| 397 |
+
freq: Frequency string ('D'=daily, 'h'=hourly, 'MS'=monthly, 'W'=weekly)
|
| 398 |
+
time_limit: Max training time in seconds
|
| 399 |
+
presets: 'fast_training', 'medium_quality', 'best_quality', or 'chronos_tiny'
|
| 400 |
+
output_path: Path to save forecast CSV
|
| 401 |
+
static_features_path: CSV with per-series metadata (one row per series)
|
| 402 |
+
known_covariates_cols: Columns with future-known values (holidays, promotions)
|
| 403 |
+
holiday_country: Country code for auto holiday features (e.g. 'US', 'UK', 'IN')
|
| 404 |
+
fill_missing: Whether to auto-fill missing values in time series
|
| 405 |
+
models: Specific models to train (e.g. ['ETS', 'DeepAR', 'AutoARIMA'])
|
| 406 |
+
quantile_levels: Quantile levels for probabilistic forecasts (e.g. [0.1, 0.5, 0.9])
|
| 407 |
+
|
| 408 |
+
Returns:
|
| 409 |
+
Dictionary with forecasts, model performance, and leaderboard
|
| 410 |
+
"""
|
| 411 |
+
TimeSeriesPredictor, TimeSeriesDataFrame = _ensure_autogluon_timeseries()
|
| 412 |
+
|
| 413 |
+
start_time = time.time()
|
| 414 |
+
output_dir = "./outputs/autogluon_ts_model"
|
| 415 |
+
output_path = output_path or "./outputs/autogluon_forecast.csv"
|
| 416 |
+
|
| 417 |
+
# ── Validate ──
|
| 418 |
+
if not Path(file_path).exists():
|
| 419 |
+
return {"status": "error", "message": f"File not found: {file_path}"}
|
| 420 |
+
|
| 421 |
+
print(f"\n🚀 AutoGluon Time Series Forecasting...")
|
| 422 |
+
print(f" 📁 Dataset: {file_path}")
|
| 423 |
+
print(f" 🎯 Target: {target_col}")
|
| 424 |
+
print(f" 📅 Time column: {time_col}")
|
| 425 |
+
print(f" 🔮 Forecast horizon: {forecast_horizon} periods")
|
| 426 |
+
|
| 427 |
+
# ── Load and prepare data ──
|
| 428 |
+
try:
|
| 429 |
+
df = pd.read_csv(file_path)
|
| 430 |
+
except Exception:
|
| 431 |
+
try:
|
| 432 |
+
df = pd.read_parquet(file_path)
|
| 433 |
+
except Exception as e:
|
| 434 |
+
return {"status": "error", "message": f"Failed to load data: {str(e)}"}
|
| 435 |
+
|
| 436 |
+
if target_col not in df.columns:
|
| 437 |
+
return {
|
| 438 |
+
"status": "error",
|
| 439 |
+
"message": f"Target column '{target_col}' not found. Available: {list(df.columns)}"
|
| 440 |
+
}
|
| 441 |
+
if time_col not in df.columns:
|
| 442 |
+
return {
|
| 443 |
+
"status": "error",
|
| 444 |
+
"message": f"Time column '{time_col}' not found. Available: {list(df.columns)}"
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
# Parse datetime
|
| 448 |
+
df[time_col] = pd.to_datetime(df[time_col])
|
| 449 |
+
df = df.sort_values(time_col)
|
| 450 |
+
|
| 451 |
+
# If no id_col, create a dummy one (single series)
|
| 452 |
+
if id_col is None or id_col not in df.columns:
|
| 453 |
+
id_col = "__series_id"
|
| 454 |
+
df[id_col] = "series_0"
|
| 455 |
+
|
| 456 |
+
# Auto-detect frequency if not provided
|
| 457 |
+
if freq is None:
|
| 458 |
+
time_diffs = df[time_col].diff().dropna()
|
| 459 |
+
median_diff = time_diffs.median()
|
| 460 |
+
if median_diff <= pd.Timedelta(hours=2):
|
| 461 |
+
freq = "h"
|
| 462 |
+
elif median_diff <= pd.Timedelta(days=1.5):
|
| 463 |
+
freq = "D"
|
| 464 |
+
elif median_diff <= pd.Timedelta(days=8):
|
| 465 |
+
freq = "W"
|
| 466 |
+
elif median_diff <= pd.Timedelta(days=35):
|
| 467 |
+
freq = "MS"
|
| 468 |
+
else:
|
| 469 |
+
freq = "D" # Default
|
| 470 |
+
|
| 471 |
+
print(f" 📊 Frequency: {freq}")
|
| 472 |
+
print(f" 📐 Shape: {df.shape[0]:,} rows")
|
| 473 |
+
|
| 474 |
+
# ── Add holiday features (#29) ──
|
| 475 |
+
if holiday_country:
|
| 476 |
+
try:
|
| 477 |
+
import holidays as holidays_lib
|
| 478 |
+
country_holidays = holidays_lib.country_holidays(holiday_country)
|
| 479 |
+
df['is_holiday'] = df[time_col].dt.date.apply(
|
| 480 |
+
lambda d: 1 if d in country_holidays else 0
|
| 481 |
+
).astype(float)
|
| 482 |
+
if known_covariates_cols is None:
|
| 483 |
+
known_covariates_cols = []
|
| 484 |
+
if 'is_holiday' not in known_covariates_cols:
|
| 485 |
+
known_covariates_cols.append('is_holiday')
|
| 486 |
+
print(f" 🎄 Holiday features added for: {holiday_country}")
|
| 487 |
+
except ImportError:
|
| 488 |
+
print(f" ⚠️ 'holidays' package not installed. Skipping holiday features.")
|
| 489 |
+
except Exception as e:
|
| 490 |
+
print(f" ⚠️ Could not add holiday features: {e}")
|
| 491 |
+
|
| 492 |
+
# ── Convert to TimeSeriesDataFrame ──
|
| 493 |
+
try:
|
| 494 |
+
ts_df = TimeSeriesDataFrame.from_data_frame(
|
| 495 |
+
df,
|
| 496 |
+
id_column=id_col,
|
| 497 |
+
timestamp_column=time_col
|
| 498 |
+
)
|
| 499 |
+
except Exception as e:
|
| 500 |
+
return {"status": "error", "message": f"Failed to create time series: {str(e)}"}
|
| 501 |
+
|
| 502 |
+
# ── Attach static features (#26) ──
|
| 503 |
+
if static_features_path and Path(static_features_path).exists():
|
| 504 |
+
try:
|
| 505 |
+
static_df = pd.read_csv(static_features_path)
|
| 506 |
+
ts_df.static_features = static_df
|
| 507 |
+
print(f" 📌 Static features loaded: {list(static_df.columns)}")
|
| 508 |
+
except Exception as e:
|
| 509 |
+
print(f" ⚠️ Could not load static features: {e}")
|
| 510 |
+
|
| 511 |
+
# ── Fill missing values (#36) ──
|
| 512 |
+
if fill_missing:
|
| 513 |
+
try:
|
| 514 |
+
ts_df = ts_df.fill_missing_values()
|
| 515 |
+
print(f" 🔧 Missing values filled")
|
| 516 |
+
except Exception:
|
| 517 |
+
pass
|
| 518 |
+
|
| 519 |
+
# ── Clean output dir ──
|
| 520 |
+
if Path(output_dir).exists():
|
| 521 |
+
shutil.rmtree(output_dir, ignore_errors=True)
|
| 522 |
+
|
| 523 |
+
# ── Get resource config ──
|
| 524 |
+
resource_config = _get_resource_config()
|
| 525 |
+
effective_time_limit = min(time_limit, resource_config["time_limit"])
|
| 526 |
+
|
| 527 |
+
# ── Train forecasting models ──
|
| 528 |
+
try:
|
| 529 |
+
predictor_kwargs = dict(
|
| 530 |
+
target=target_col,
|
| 531 |
+
prediction_length=forecast_horizon,
|
| 532 |
+
path=output_dir,
|
| 533 |
+
freq=freq
|
| 534 |
+
)
|
| 535 |
+
if known_covariates_cols:
|
| 536 |
+
predictor_kwargs["known_covariates_names"] = known_covariates_cols
|
| 537 |
+
if quantile_levels:
|
| 538 |
+
predictor_kwargs["quantile_levels"] = quantile_levels
|
| 539 |
+
|
| 540 |
+
predictor = TimeSeriesPredictor(**predictor_kwargs)
|
| 541 |
+
|
| 542 |
+
ts_fit_kwargs = dict(
|
| 543 |
+
train_data=ts_df,
|
| 544 |
+
time_limit=effective_time_limit,
|
| 545 |
+
presets=presets,
|
| 546 |
+
)
|
| 547 |
+
if models:
|
| 548 |
+
ts_fit_kwargs["hyperparameters"] = {m: {} for m in models}
|
| 549 |
+
|
| 550 |
+
predictor.fit(**ts_fit_kwargs)
|
| 551 |
+
except Exception as e:
|
| 552 |
+
return {"status": "error", "message": f"Time series training failed: {str(e)}"}
|
| 553 |
+
|
| 554 |
+
elapsed = time.time() - start_time
|
| 555 |
+
|
| 556 |
+
# ── Generate forecasts ──
|
| 557 |
+
try:
|
| 558 |
+
predict_kwargs = {}
|
| 559 |
+
if known_covariates_cols:
|
| 560 |
+
try:
|
| 561 |
+
future_known = predictor.make_future_data_frame(ts_df)
|
| 562 |
+
if holiday_country:
|
| 563 |
+
import holidays as holidays_lib
|
| 564 |
+
country_holidays = holidays_lib.country_holidays(holiday_country)
|
| 565 |
+
dates = future_known.index.get_level_values('timestamp')
|
| 566 |
+
future_known['is_holiday'] = [
|
| 567 |
+
1.0 if d.date() in country_holidays else 0.0 for d in dates
|
| 568 |
+
]
|
| 569 |
+
predict_kwargs["known_covariates"] = future_known
|
| 570 |
+
except Exception:
|
| 571 |
+
pass
|
| 572 |
+
forecasts = predictor.predict(ts_df, **predict_kwargs)
|
| 573 |
+
except Exception as e:
|
| 574 |
+
return {"status": "error", "message": f"Forecasting failed: {str(e)}"}
|
| 575 |
+
|
| 576 |
+
# ── Leaderboard ──
|
| 577 |
+
leaderboard = predictor.leaderboard(silent=True)
|
| 578 |
+
leaderboard_data = []
|
| 579 |
+
for _, row in leaderboard.head(10).iterrows():
|
| 580 |
+
leaderboard_data.append({
|
| 581 |
+
"model": str(row.get("model", "")),
|
| 582 |
+
"score_val": round(float(row.get("score_val", 0)), 4),
|
| 583 |
+
"fit_time": round(float(row.get("fit_time", 0)), 1),
|
| 584 |
+
})
|
| 585 |
+
|
| 586 |
+
best_model = predictor.model_best if hasattr(predictor, 'model_best') else leaderboard_data[0]["model"] if leaderboard_data else "unknown"
|
| 587 |
+
best_score = leaderboard_data[0]["score_val"] if leaderboard_data else None
|
| 588 |
+
|
| 589 |
+
# ── Save forecasts ──
|
| 590 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 591 |
+
try:
|
| 592 |
+
forecast_df = forecasts.reset_index()
|
| 593 |
+
forecast_df.to_csv(output_path, index=False)
|
| 594 |
+
except Exception:
|
| 595 |
+
output_path = output_path # Keep path but note it may not have saved
|
| 596 |
+
|
| 597 |
+
# ── Forecast summary ──
|
| 598 |
+
forecast_summary = {}
|
| 599 |
+
try:
|
| 600 |
+
mean_col = "mean" if "mean" in forecasts.columns else forecasts.columns[0]
|
| 601 |
+
forecast_values = forecasts[mean_col].values
|
| 602 |
+
forecast_summary = {
|
| 603 |
+
"mean_forecast": round(float(np.mean(forecast_values)), 2),
|
| 604 |
+
"min_forecast": round(float(np.min(forecast_values)), 2),
|
| 605 |
+
"max_forecast": round(float(np.max(forecast_values)), 2),
|
| 606 |
+
"forecast_std": round(float(np.std(forecast_values)), 2),
|
| 607 |
+
}
|
| 608 |
+
except Exception:
|
| 609 |
+
pass
|
| 610 |
+
|
| 611 |
+
results = {
|
| 612 |
+
"status": "success",
|
| 613 |
+
"task_type": "time_series_forecasting",
|
| 614 |
+
"target_col": target_col,
|
| 615 |
+
"time_col": time_col,
|
| 616 |
+
"forecast_horizon": forecast_horizon,
|
| 617 |
+
"frequency": freq,
|
| 618 |
+
"n_series": df[id_col].nunique() if id_col != "__series_id" else 1,
|
| 619 |
+
"n_data_points": len(df),
|
| 620 |
+
"best_model": best_model,
|
| 621 |
+
"best_score": best_score,
|
| 622 |
+
"n_models_trained": len(leaderboard),
|
| 623 |
+
"training_time_seconds": round(elapsed, 1),
|
| 624 |
+
"leaderboard": leaderboard_data,
|
| 625 |
+
"forecast_summary": forecast_summary,
|
| 626 |
+
"output_path": output_path,
|
| 627 |
+
"model_path": output_dir,
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
# ── Print summary ──
|
| 631 |
+
print(f"\n{'='*60}")
|
| 632 |
+
print(f"✅ TIME SERIES FORECASTING COMPLETE")
|
| 633 |
+
print(f"{'='*60}")
|
| 634 |
+
print(f"📊 Models trained: {len(leaderboard)}")
|
| 635 |
+
print(f"🏆 Best model: {best_model}")
|
| 636 |
+
print(f"📈 Best score: {best_score}")
|
| 637 |
+
print(f"🔮 Forecast: {forecast_horizon} periods ahead")
|
| 638 |
+
if forecast_summary:
|
| 639 |
+
print(f"📉 Forecast range: {forecast_summary.get('min_forecast')} to {forecast_summary.get('max_forecast')}")
|
| 640 |
+
print(f"⏱️ Total time: {elapsed:.1f}s")
|
| 641 |
+
print(f"💾 Forecasts saved: {output_path}")
|
| 642 |
+
if leaderboard_data:
|
| 643 |
+
print(f"\n📋 Leaderboard:")
|
| 644 |
+
for i, entry in enumerate(leaderboard_data[:5], 1):
|
| 645 |
+
print(f" {i}. {entry['model']}: {entry['score_val']:.4f}")
|
| 646 |
+
print(f"{'='*60}\n")
|
| 647 |
+
|
| 648 |
+
return results
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
# ============================================================
|
| 652 |
+
# POST-TRAINING OPTIMIZATION (#1, #2, #6, #8, #9, #24)
|
| 653 |
+
# ============================================================
|
| 654 |
+
|
| 655 |
+
def optimize_autogluon_model(
|
| 656 |
+
model_path: str,
|
| 657 |
+
operation: str,
|
| 658 |
+
data_path: Optional[str] = None,
|
| 659 |
+
metric: Optional[str] = None,
|
| 660 |
+
models_to_delete: Optional[List[str]] = None,
|
| 661 |
+
output_dir: Optional[str] = None
|
| 662 |
+
) -> Dict[str, Any]:
|
| 663 |
+
"""
|
| 664 |
+
Post-training optimization on a trained AutoGluon model.
|
| 665 |
+
|
| 666 |
+
Operations:
|
| 667 |
+
- refit_full: Re-train best models on 100% data (no held-out fold) for deployment
|
| 668 |
+
- distill: Compress ensemble into a single lighter model via knowledge distillation
|
| 669 |
+
- calibrate_threshold: Optimize binary classification threshold for best F1/precision/recall
|
| 670 |
+
- deploy_optimize: Strip training artifacts for minimal deployment footprint
|
| 671 |
+
- delete_models: Remove specific models to free resources
|
| 672 |
+
|
| 673 |
+
Args:
|
| 674 |
+
model_path: Path to saved AutoGluon model directory
|
| 675 |
+
operation: One of 'refit_full', 'distill', 'calibrate_threshold', 'deploy_optimize', 'delete_models'
|
| 676 |
+
data_path: Path to dataset (required for distill, calibrate_threshold)
|
| 677 |
+
metric: Metric to optimize for calibrate_threshold: 'f1', 'balanced_accuracy', 'precision', 'recall'
|
| 678 |
+
models_to_delete: List of model names to delete (for delete_models operation)
|
| 679 |
+
output_dir: Directory for optimized model output (for deploy_optimize)
|
| 680 |
+
|
| 681 |
+
Returns:
|
| 682 |
+
Dictionary with optimization results
|
| 683 |
+
"""
|
| 684 |
+
TabularPredictor, TabularDataset = _ensure_autogluon_tabular()
|
| 685 |
+
|
| 686 |
+
if not Path(model_path).exists():
|
| 687 |
+
return {"status": "error", "message": f"Model not found: {model_path}"}
|
| 688 |
+
|
| 689 |
+
try:
|
| 690 |
+
predictor = TabularPredictor.load(model_path)
|
| 691 |
+
except Exception as e:
|
| 692 |
+
return {"status": "error", "message": f"Failed to load model: {str(e)}"}
|
| 693 |
+
|
| 694 |
+
print(f"\n🔧 AutoGluon Model Optimization: {operation}")
|
| 695 |
+
print(f" 📁 Model: {model_path}")
|
| 696 |
+
|
| 697 |
+
try:
|
| 698 |
+
if operation == "refit_full":
|
| 699 |
+
refit_map = predictor.refit_full()
|
| 700 |
+
refit_models = list(refit_map.values())
|
| 701 |
+
new_leaderboard = predictor.leaderboard(silent=True)
|
| 702 |
+
|
| 703 |
+
leaderboard_data = []
|
| 704 |
+
for _, row in new_leaderboard.head(10).iterrows():
|
| 705 |
+
leaderboard_data.append({
|
| 706 |
+
"model": str(row.get("model", "")),
|
| 707 |
+
"score_val": round(float(row.get("score_val", 0)), 4),
|
| 708 |
+
})
|
| 709 |
+
|
| 710 |
+
print(f" ✅ Models refit on 100% data: {refit_models}")
|
| 711 |
+
return {
|
| 712 |
+
"status": "success",
|
| 713 |
+
"operation": "refit_full",
|
| 714 |
+
"message": "Models re-trained on 100% data (no held-out folds) for deployment",
|
| 715 |
+
"refit_models": refit_models,
|
| 716 |
+
"original_best": predictor.model_best,
|
| 717 |
+
"leaderboard": leaderboard_data,
|
| 718 |
+
"model_path": model_path
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
elif operation == "distill":
|
| 722 |
+
if not data_path or not Path(data_path).exists():
|
| 723 |
+
return {"status": "error", "message": "data_path required for distillation"}
|
| 724 |
+
|
| 725 |
+
train_data = TabularDataset(data_path)
|
| 726 |
+
resource_config = _get_resource_config()
|
| 727 |
+
|
| 728 |
+
distilled = predictor.distill(
|
| 729 |
+
train_data=train_data,
|
| 730 |
+
time_limit=resource_config["time_limit"],
|
| 731 |
+
augment_method='spunge'
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
new_leaderboard = predictor.leaderboard(silent=True)
|
| 735 |
+
leaderboard_data = []
|
| 736 |
+
for _, row in new_leaderboard.head(10).iterrows():
|
| 737 |
+
leaderboard_data.append({
|
| 738 |
+
"model": str(row.get("model", "")),
|
| 739 |
+
"score_val": round(float(row.get("score_val", 0)), 4),
|
| 740 |
+
})
|
| 741 |
+
|
| 742 |
+
print(f" ✅ Ensemble distilled into: {distilled}")
|
| 743 |
+
return {
|
| 744 |
+
"status": "success",
|
| 745 |
+
"operation": "distill",
|
| 746 |
+
"message": "Ensemble distilled into lighter model(s) via knowledge distillation",
|
| 747 |
+
"distilled_models": distilled,
|
| 748 |
+
"best_model": predictor.model_best,
|
| 749 |
+
"leaderboard": leaderboard_data,
|
| 750 |
+
"model_path": model_path
|
| 751 |
+
}
|
| 752 |
+
|
| 753 |
+
elif operation == "calibrate_threshold":
|
| 754 |
+
if not data_path or not Path(data_path).exists():
|
| 755 |
+
return {"status": "error", "message": "data_path required for threshold calibration"}
|
| 756 |
+
|
| 757 |
+
if predictor.problem_type != 'binary':
|
| 758 |
+
return {"status": "error", "message": "Threshold calibration only works for binary classification"}
|
| 759 |
+
|
| 760 |
+
test_data = TabularDataset(data_path)
|
| 761 |
+
metric = metric or "f1"
|
| 762 |
+
|
| 763 |
+
threshold, score = predictor.calibrate_decision_threshold(
|
| 764 |
+
data=test_data,
|
| 765 |
+
metric=metric
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
print(f" ✅ Optimal threshold: {threshold:.4f} ({metric}={score:.4f})")
|
| 769 |
+
return {
|
| 770 |
+
"status": "success",
|
| 771 |
+
"operation": "calibrate_threshold",
|
| 772 |
+
"optimal_threshold": round(float(threshold), 4),
|
| 773 |
+
"score_at_threshold": round(float(score), 4),
|
| 774 |
+
"metric": metric,
|
| 775 |
+
"message": f"Optimal threshold: {threshold:.4f} (default was 0.5), {metric}={score:.4f}",
|
| 776 |
+
"model_path": model_path
|
| 777 |
+
}
|
| 778 |
+
|
| 779 |
+
elif operation == "deploy_optimize":
|
| 780 |
+
output_dir = output_dir or model_path + "_deploy"
|
| 781 |
+
|
| 782 |
+
size_before = sum(
|
| 783 |
+
f.stat().st_size for f in Path(model_path).rglob('*') if f.is_file()
|
| 784 |
+
) / (1024 * 1024)
|
| 785 |
+
|
| 786 |
+
deploy_path = predictor.clone_for_deployment(output_dir)
|
| 787 |
+
|
| 788 |
+
deploy_predictor = TabularPredictor.load(deploy_path)
|
| 789 |
+
deploy_predictor.save_space()
|
| 790 |
+
|
| 791 |
+
size_after = sum(
|
| 792 |
+
f.stat().st_size for f in Path(deploy_path).rglob('*') if f.is_file()
|
| 793 |
+
) / (1024 * 1024)
|
| 794 |
+
|
| 795 |
+
print(f" ✅ Optimized: {size_before:.1f}MB → {size_after:.1f}MB")
|
| 796 |
+
return {
|
| 797 |
+
"status": "success",
|
| 798 |
+
"operation": "deploy_optimize",
|
| 799 |
+
"message": f"Model optimized for deployment: {size_before:.1f}MB → {size_after:.1f}MB ({(1-size_after/max(size_before,0.01))*100:.0f}% reduction)",
|
| 800 |
+
"size_before_mb": round(size_before, 1),
|
| 801 |
+
"size_after_mb": round(size_after, 1),
|
| 802 |
+
"deploy_path": str(deploy_path),
|
| 803 |
+
"best_model": deploy_predictor.model_best
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
elif operation == "delete_models":
|
| 807 |
+
if not models_to_delete:
|
| 808 |
+
return {"status": "error", "message": "models_to_delete list required"}
|
| 809 |
+
|
| 810 |
+
before_count = len(predictor.model_names())
|
| 811 |
+
predictor.delete_models(models_to_delete=models_to_delete, dry_run=False)
|
| 812 |
+
after_count = len(predictor.model_names())
|
| 813 |
+
|
| 814 |
+
print(f" ✅ Deleted {before_count - after_count} models")
|
| 815 |
+
return {
|
| 816 |
+
"status": "success",
|
| 817 |
+
"operation": "delete_models",
|
| 818 |
+
"message": f"Deleted {before_count - after_count} models ({before_count} → {after_count})",
|
| 819 |
+
"remaining_models": predictor.model_names(),
|
| 820 |
+
"best_model": predictor.model_best,
|
| 821 |
+
"model_path": model_path
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
else:
|
| 825 |
+
return {
|
| 826 |
+
"status": "error",
|
| 827 |
+
"message": f"Unknown operation '{operation}'. Choose: refit_full, distill, calibrate_threshold, deploy_optimize, delete_models"
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
except Exception as e:
|
| 831 |
+
return {"status": "error", "message": f"Optimization failed: {str(e)}"}
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
# ============================================================
|
| 835 |
+
# MODEL ANALYSIS & INSPECTION (#19 + extended leaderboard)
|
| 836 |
+
# ============================================================
|
| 837 |
+
|
| 838 |
+
def analyze_autogluon_model(
|
| 839 |
+
model_path: str,
|
| 840 |
+
data_path: Optional[str] = None,
|
| 841 |
+
operation: str = "summary"
|
| 842 |
+
) -> Dict[str, Any]:
|
| 843 |
+
"""
|
| 844 |
+
Inspect and analyze a trained AutoGluon model.
|
| 845 |
+
|
| 846 |
+
Operations:
|
| 847 |
+
- summary: Extended leaderboard with detailed model info (stack levels, memory, etc.)
|
| 848 |
+
- transform_features: Returns the internally transformed feature matrix
|
| 849 |
+
- info: Comprehensive model metadata and training summary
|
| 850 |
+
|
| 851 |
+
Args:
|
| 852 |
+
model_path: Path to saved AutoGluon model directory
|
| 853 |
+
data_path: Path to dataset (required for transform_features)
|
| 854 |
+
operation: One of 'summary', 'transform_features', 'info'
|
| 855 |
+
|
| 856 |
+
Returns:
|
| 857 |
+
Dictionary with analysis results
|
| 858 |
+
"""
|
| 859 |
+
TabularPredictor, TabularDataset = _ensure_autogluon_tabular()
|
| 860 |
+
|
| 861 |
+
if not Path(model_path).exists():
|
| 862 |
+
return {"status": "error", "message": f"Model not found: {model_path}"}
|
| 863 |
+
|
| 864 |
+
try:
|
| 865 |
+
predictor = TabularPredictor.load(model_path)
|
| 866 |
+
except Exception as e:
|
| 867 |
+
return {"status": "error", "message": f"Failed to load model: {str(e)}"}
|
| 868 |
+
|
| 869 |
+
try:
|
| 870 |
+
if operation == "summary":
|
| 871 |
+
leaderboard = predictor.leaderboard(extra_info=True, silent=True)
|
| 872 |
+
|
| 873 |
+
leaderboard_data = []
|
| 874 |
+
for _, row in leaderboard.iterrows():
|
| 875 |
+
entry = {"model": str(row.get("model", ""))}
|
| 876 |
+
for col in leaderboard.columns:
|
| 877 |
+
if col != "model":
|
| 878 |
+
val = row[col]
|
| 879 |
+
try:
|
| 880 |
+
entry[str(col)] = round(float(val), 4) if isinstance(val, (int, float, np.floating)) else str(val)
|
| 881 |
+
except (ValueError, TypeError):
|
| 882 |
+
entry[str(col)] = str(val)
|
| 883 |
+
leaderboard_data.append(entry)
|
| 884 |
+
|
| 885 |
+
return {
|
| 886 |
+
"status": "success",
|
| 887 |
+
"operation": "summary",
|
| 888 |
+
"best_model": predictor.model_best,
|
| 889 |
+
"problem_type": predictor.problem_type,
|
| 890 |
+
"eval_metric": str(predictor.eval_metric),
|
| 891 |
+
"n_models": len(leaderboard),
|
| 892 |
+
"model_names": predictor.model_names(),
|
| 893 |
+
"leaderboard": leaderboard_data
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
elif operation == "transform_features":
|
| 897 |
+
if not data_path or not Path(data_path).exists():
|
| 898 |
+
return {"status": "error", "message": "data_path required for transform_features"}
|
| 899 |
+
|
| 900 |
+
data = TabularDataset(data_path)
|
| 901 |
+
transformed = predictor.transform_features(data)
|
| 902 |
+
|
| 903 |
+
output_path = "./outputs/autogluon_transformed_features.csv"
|
| 904 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 905 |
+
transformed.to_csv(output_path, index=False)
|
| 906 |
+
|
| 907 |
+
return {
|
| 908 |
+
"status": "success",
|
| 909 |
+
"operation": "transform_features",
|
| 910 |
+
"original_shape": list(data.shape),
|
| 911 |
+
"transformed_shape": list(transformed.shape),
|
| 912 |
+
"original_columns": list(data.columns[:20]),
|
| 913 |
+
"transformed_columns": list(transformed.columns[:30]),
|
| 914 |
+
"output_path": output_path,
|
| 915 |
+
"message": f"Features transformed: {data.shape[1]} original → {transformed.shape[1]} engineered"
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
elif operation == "info":
|
| 919 |
+
info = predictor.info()
|
| 920 |
+
|
| 921 |
+
safe_info = {}
|
| 922 |
+
for key, val in info.items():
|
| 923 |
+
try:
|
| 924 |
+
json.dumps(val)
|
| 925 |
+
safe_info[key] = val
|
| 926 |
+
except (TypeError, ValueError):
|
| 927 |
+
safe_info[key] = str(val)
|
| 928 |
+
|
| 929 |
+
return {
|
| 930 |
+
"status": "success",
|
| 931 |
+
"operation": "info",
|
| 932 |
+
"model_info": safe_info
|
| 933 |
+
}
|
| 934 |
+
|
| 935 |
+
else:
|
| 936 |
+
return {
|
| 937 |
+
"status": "error",
|
| 938 |
+
"message": f"Unknown operation '{operation}'. Choose: summary, transform_features, info"
|
| 939 |
+
}
|
| 940 |
+
|
| 941 |
+
except Exception as e:
|
| 942 |
+
return {"status": "error", "message": f"Analysis failed: {str(e)}"}
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
# ============================================================
|
| 946 |
+
# INCREMENTAL TRAINING (#3, #5)
|
| 947 |
+
# ============================================================
|
| 948 |
+
|
| 949 |
+
def extend_autogluon_training(
|
| 950 |
+
model_path: str,
|
| 951 |
+
operation: str = "fit_extra",
|
| 952 |
+
data_path: Optional[str] = None,
|
| 953 |
+
time_limit: int = 60,
|
| 954 |
+
hyperparameters: Optional[Dict] = None
|
| 955 |
+
) -> Dict[str, Any]:
|
| 956 |
+
"""
|
| 957 |
+
Add models or re-fit ensemble on an existing AutoGluon predictor.
|
| 958 |
+
|
| 959 |
+
Operations:
|
| 960 |
+
- fit_extra: Train additional models/hyperparameters without retraining from scratch
|
| 961 |
+
- fit_weighted_ensemble: Re-fit the weighted ensemble layer on existing base models
|
| 962 |
+
|
| 963 |
+
Args:
|
| 964 |
+
model_path: Path to saved AutoGluon model directory
|
| 965 |
+
operation: 'fit_extra' or 'fit_weighted_ensemble'
|
| 966 |
+
data_path: Path to training data (required for fit_extra)
|
| 967 |
+
time_limit: Additional training time in seconds
|
| 968 |
+
hyperparameters: Model hyperparameters dict for fit_extra.
|
| 969 |
+
e.g. {"GBM": {"num_boost_round": 500}, "RF": {}}
|
| 970 |
+
|
| 971 |
+
Returns:
|
| 972 |
+
Dictionary with updated model info
|
| 973 |
+
"""
|
| 974 |
+
TabularPredictor, TabularDataset = _ensure_autogluon_tabular()
|
| 975 |
+
|
| 976 |
+
if not Path(model_path).exists():
|
| 977 |
+
return {"status": "error", "message": f"Model not found: {model_path}"}
|
| 978 |
+
|
| 979 |
+
try:
|
| 980 |
+
predictor = TabularPredictor.load(model_path)
|
| 981 |
+
except Exception as e:
|
| 982 |
+
return {"status": "error", "message": f"Failed to load model: {str(e)}"}
|
| 983 |
+
|
| 984 |
+
before_models = predictor.model_names()
|
| 985 |
+
print(f"\n🔧 Extending AutoGluon Model: {operation}")
|
| 986 |
+
print(f" 📁 Model: {model_path}")
|
| 987 |
+
print(f" 📊 Current models: {len(before_models)}")
|
| 988 |
+
|
| 989 |
+
try:
|
| 990 |
+
if operation == "fit_extra":
|
| 991 |
+
if not data_path or not Path(data_path).exists():
|
| 992 |
+
return {"status": "error", "message": "data_path required for fit_extra"}
|
| 993 |
+
|
| 994 |
+
resource_config = _get_resource_config()
|
| 995 |
+
|
| 996 |
+
hp = hyperparameters or {
|
| 997 |
+
"GBM": [
|
| 998 |
+
{"extra_trees": True, "ag_args": {"name_suffix": "XT"}},
|
| 999 |
+
{"num_boost_round": 500},
|
| 1000 |
+
],
|
| 1001 |
+
"RF": [
|
| 1002 |
+
{"criterion": "gini", "ag_args": {"name_suffix": "Gini"}},
|
| 1003 |
+
{"criterion": "entropy", "ag_args": {"name_suffix": "Entr"}},
|
| 1004 |
+
],
|
| 1005 |
+
}
|
| 1006 |
+
|
| 1007 |
+
predictor.fit_extra(
|
| 1008 |
+
hyperparameters=hp,
|
| 1009 |
+
time_limit=min(time_limit, resource_config["time_limit"]),
|
| 1010 |
+
num_cpus=resource_config["num_cpus"],
|
| 1011 |
+
num_gpus=0
|
| 1012 |
+
)
|
| 1013 |
+
|
| 1014 |
+
elif operation == "fit_weighted_ensemble":
|
| 1015 |
+
predictor.fit_weighted_ensemble()
|
| 1016 |
+
|
| 1017 |
+
else:
|
| 1018 |
+
return {
|
| 1019 |
+
"status": "error",
|
| 1020 |
+
"message": f"Unknown operation '{operation}'. Choose: fit_extra, fit_weighted_ensemble"
|
| 1021 |
+
}
|
| 1022 |
+
|
| 1023 |
+
after_models = predictor.model_names()
|
| 1024 |
+
leaderboard = predictor.leaderboard(silent=True)
|
| 1025 |
+
|
| 1026 |
+
leaderboard_data = []
|
| 1027 |
+
for _, row in leaderboard.head(10).iterrows():
|
| 1028 |
+
leaderboard_data.append({
|
| 1029 |
+
"model": str(row.get("model", "")),
|
| 1030 |
+
"score_val": round(float(row.get("score_val", 0)), 4),
|
| 1031 |
+
"fit_time": round(float(row.get("fit_time", 0)), 1),
|
| 1032 |
+
})
|
| 1033 |
+
|
| 1034 |
+
new_models = [m for m in after_models if m not in before_models]
|
| 1035 |
+
|
| 1036 |
+
print(f" ✅ New models added: {len(new_models)}")
|
| 1037 |
+
print(f" 🏆 Best model: {predictor.model_best}")
|
| 1038 |
+
|
| 1039 |
+
return {
|
| 1040 |
+
"status": "success",
|
| 1041 |
+
"operation": operation,
|
| 1042 |
+
"models_before": len(before_models),
|
| 1043 |
+
"models_after": len(after_models),
|
| 1044 |
+
"new_models": new_models,
|
| 1045 |
+
"best_model": predictor.model_best,
|
| 1046 |
+
"leaderboard": leaderboard_data,
|
| 1047 |
+
"model_path": model_path
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
except Exception as e:
|
| 1051 |
+
return {"status": "error", "message": f"Extension failed: {str(e)}"}
|
| 1052 |
+
|
| 1053 |
+
|
| 1054 |
+
# ============================================================
|
| 1055 |
+
# MULTI-LABEL PREDICTION (#14)
|
| 1056 |
+
# ============================================================
|
| 1057 |
+
|
| 1058 |
+
def train_multilabel_autogluon(
|
| 1059 |
+
file_path: str,
|
| 1060 |
+
target_cols: List[str],
|
| 1061 |
+
time_limit: int = 120,
|
| 1062 |
+
presets: str = "medium_quality",
|
| 1063 |
+
output_dir: Optional[str] = None
|
| 1064 |
+
) -> Dict[str, Any]:
|
| 1065 |
+
"""
|
| 1066 |
+
Train multi-label prediction using AutoGluon's MultilabelPredictor.
|
| 1067 |
+
Predicts multiple target columns simultaneously by training separate
|
| 1068 |
+
TabularPredictors per label with shared feature engineering.
|
| 1069 |
+
|
| 1070 |
+
Args:
|
| 1071 |
+
file_path: Path to CSV/Parquet dataset
|
| 1072 |
+
target_cols: List of columns to predict (e.g. ['label1', 'label2', 'label3'])
|
| 1073 |
+
time_limit: Max training time per label in seconds
|
| 1074 |
+
presets: Quality preset
|
| 1075 |
+
output_dir: Where to save trained model
|
| 1076 |
+
|
| 1077 |
+
Returns:
|
| 1078 |
+
Dictionary with per-label results and overall performance
|
| 1079 |
+
"""
|
| 1080 |
+
try:
|
| 1081 |
+
from autogluon.tabular import TabularDataset, MultilabelPredictor
|
| 1082 |
+
except ImportError:
|
| 1083 |
+
return {
|
| 1084 |
+
"status": "error",
|
| 1085 |
+
"message": "MultilabelPredictor not available. Ensure autogluon.tabular>=1.2 is installed."
|
| 1086 |
+
}
|
| 1087 |
+
|
| 1088 |
+
start_time = time.time()
|
| 1089 |
+
output_dir = output_dir or "./outputs/autogluon_multilabel"
|
| 1090 |
+
|
| 1091 |
+
if not Path(file_path).exists():
|
| 1092 |
+
return {"status": "error", "message": f"File not found: {file_path}"}
|
| 1093 |
+
|
| 1094 |
+
try:
|
| 1095 |
+
data = TabularDataset(file_path)
|
| 1096 |
+
except Exception as e:
|
| 1097 |
+
return {"status": "error", "message": f"Failed to load data: {str(e)}"}
|
| 1098 |
+
|
| 1099 |
+
missing_cols = [c for c in target_cols if c not in data.columns]
|
| 1100 |
+
if missing_cols:
|
| 1101 |
+
return {
|
| 1102 |
+
"status": "error",
|
| 1103 |
+
"message": f"Target columns not found: {missing_cols}. Available: {list(data.columns)}"
|
| 1104 |
+
}
|
| 1105 |
+
|
| 1106 |
+
print(f"\n🚀 AutoGluon Multi-Label Training...")
|
| 1107 |
+
print(f" 📁 Dataset: {file_path}")
|
| 1108 |
+
print(f" 🎯 Targets: {target_cols}")
|
| 1109 |
+
print(f" 📐 Shape: {data.shape[0]:,} rows × {data.shape[1]} columns")
|
| 1110 |
+
|
| 1111 |
+
resource_config = _get_resource_config()
|
| 1112 |
+
effective_time_limit = min(time_limit, resource_config["time_limit"])
|
| 1113 |
+
|
| 1114 |
+
if Path(output_dir).exists():
|
| 1115 |
+
shutil.rmtree(output_dir, ignore_errors=True)
|
| 1116 |
+
|
| 1117 |
+
try:
|
| 1118 |
+
multi_predictor = MultilabelPredictor(
|
| 1119 |
+
labels=target_cols,
|
| 1120 |
+
path=output_dir
|
| 1121 |
+
)
|
| 1122 |
+
|
| 1123 |
+
multi_predictor.fit(
|
| 1124 |
+
train_data=data,
|
| 1125 |
+
time_limit=effective_time_limit,
|
| 1126 |
+
presets=presets
|
| 1127 |
+
)
|
| 1128 |
+
except Exception as e:
|
| 1129 |
+
return {"status": "error", "message": f"Multi-label training failed: {str(e)}"}
|
| 1130 |
+
|
| 1131 |
+
elapsed = time.time() - start_time
|
| 1132 |
+
|
| 1133 |
+
per_label_results = {}
|
| 1134 |
+
for label in target_cols:
|
| 1135 |
+
try:
|
| 1136 |
+
label_predictor = multi_predictor.get_predictor(label)
|
| 1137 |
+
lb = label_predictor.leaderboard(silent=True)
|
| 1138 |
+
per_label_results[label] = {
|
| 1139 |
+
"best_model": label_predictor.model_best,
|
| 1140 |
+
"best_score": round(float(lb.iloc[0]["score_val"]), 4) if len(lb) > 0 else None,
|
| 1141 |
+
"n_models": len(lb),
|
| 1142 |
+
"problem_type": label_predictor.problem_type
|
| 1143 |
+
}
|
| 1144 |
+
except Exception:
|
| 1145 |
+
per_label_results[label] = {"error": "Could not retrieve results"}
|
| 1146 |
+
|
| 1147 |
+
print(f"\n{'='*60}")
|
| 1148 |
+
print(f"✅ MULTI-LABEL TRAINING COMPLETE")
|
| 1149 |
+
print(f"{'='*60}")
|
| 1150 |
+
for label, result in per_label_results.items():
|
| 1151 |
+
score = result.get('best_score', 'N/A')
|
| 1152 |
+
model = result.get('best_model', 'N/A')
|
| 1153 |
+
print(f" 🎯 {label}: {model} (score: {score})")
|
| 1154 |
+
print(f" ⏱️ Total time: {elapsed:.1f}s")
|
| 1155 |
+
print(f"{'='*60}\n")
|
| 1156 |
+
|
| 1157 |
+
return {
|
| 1158 |
+
"status": "success",
|
| 1159 |
+
"task_type": "multilabel",
|
| 1160 |
+
"n_labels": len(target_cols),
|
| 1161 |
+
"labels": target_cols,
|
| 1162 |
+
"per_label_results": per_label_results,
|
| 1163 |
+
"training_time_seconds": round(elapsed, 1),
|
| 1164 |
+
"model_path": output_dir,
|
| 1165 |
+
"output_path": output_dir
|
| 1166 |
+
}
|
| 1167 |
+
|
| 1168 |
+
|
| 1169 |
+
# ============================================================
|
| 1170 |
+
# TIME SERIES BACKTESTING (#33)
|
| 1171 |
+
# ============================================================
|
| 1172 |
+
|
| 1173 |
+
def backtest_timeseries(
|
| 1174 |
+
file_path: str,
|
| 1175 |
+
target_col: str,
|
| 1176 |
+
time_col: str,
|
| 1177 |
+
forecast_horizon: int = 30,
|
| 1178 |
+
id_col: Optional[str] = None,
|
| 1179 |
+
freq: Optional[str] = None,
|
| 1180 |
+
num_val_windows: int = 3,
|
| 1181 |
+
time_limit: int = 120,
|
| 1182 |
+
presets: str = "medium_quality",
|
| 1183 |
+
output_path: Optional[str] = None
|
| 1184 |
+
) -> Dict[str, Any]:
|
| 1185 |
+
"""
|
| 1186 |
+
Backtest time series models using multiple validation windows.
|
| 1187 |
+
|
| 1188 |
+
Trains models with multi-window cross-validation for robust performance
|
| 1189 |
+
estimates. More reliable than a single train/test split.
|
| 1190 |
+
|
| 1191 |
+
Args:
|
| 1192 |
+
file_path: Path to time series CSV/Parquet
|
| 1193 |
+
target_col: Column with values to forecast
|
| 1194 |
+
time_col: Column with timestamps/dates
|
| 1195 |
+
forecast_horizon: Periods to predict per window
|
| 1196 |
+
id_col: Column identifying different series
|
| 1197 |
+
freq: Frequency string ('D', 'h', 'W', 'MS')
|
| 1198 |
+
num_val_windows: Number of backtesting windows (default: 3)
|
| 1199 |
+
time_limit: Max training time in seconds
|
| 1200 |
+
presets: Quality preset
|
| 1201 |
+
output_path: Path to save backtest predictions CSV
|
| 1202 |
+
|
| 1203 |
+
Returns:
|
| 1204 |
+
Dictionary with per-window evaluation and aggregate metrics
|
| 1205 |
+
"""
|
| 1206 |
+
TimeSeriesPredictor, TimeSeriesDataFrame = _ensure_autogluon_timeseries()
|
| 1207 |
+
|
| 1208 |
+
start_time = time.time()
|
| 1209 |
+
output_dir = "./outputs/autogluon_ts_backtest"
|
| 1210 |
+
output_path = output_path or "./outputs/autogluon_backtest.csv"
|
| 1211 |
+
|
| 1212 |
+
if not Path(file_path).exists():
|
| 1213 |
+
return {"status": "error", "message": f"File not found: {file_path}"}
|
| 1214 |
+
|
| 1215 |
+
print(f"\n📊 Time Series Backtesting ({num_val_windows} windows)...")
|
| 1216 |
+
print(f" 📁 Dataset: {file_path}")
|
| 1217 |
+
print(f" 🎯 Target: {target_col}")
|
| 1218 |
+
print(f" 🔮 Horizon: {forecast_horizon} periods × {num_val_windows} windows")
|
| 1219 |
+
|
| 1220 |
+
# Load data
|
| 1221 |
+
try:
|
| 1222 |
+
df = pd.read_csv(file_path)
|
| 1223 |
+
except Exception:
|
| 1224 |
+
try:
|
| 1225 |
+
df = pd.read_parquet(file_path)
|
| 1226 |
+
except Exception as e:
|
| 1227 |
+
return {"status": "error", "message": f"Failed to load data: {str(e)}"}
|
| 1228 |
+
|
| 1229 |
+
if target_col not in df.columns or time_col not in df.columns:
|
| 1230 |
+
return {"status": "error", "message": f"Columns not found. Available: {list(df.columns)}"}
|
| 1231 |
+
|
| 1232 |
+
df[time_col] = pd.to_datetime(df[time_col])
|
| 1233 |
+
df = df.sort_values(time_col)
|
| 1234 |
+
|
| 1235 |
+
if id_col is None or id_col not in df.columns:
|
| 1236 |
+
id_col_name = "__series_id"
|
| 1237 |
+
df[id_col_name] = "series_0"
|
| 1238 |
+
else:
|
| 1239 |
+
id_col_name = id_col
|
| 1240 |
+
|
| 1241 |
+
# Auto-detect frequency
|
| 1242 |
+
if freq is None:
|
| 1243 |
+
time_diffs = df[time_col].diff().dropna()
|
| 1244 |
+
median_diff = time_diffs.median()
|
| 1245 |
+
if median_diff <= pd.Timedelta(hours=2):
|
| 1246 |
+
freq = "h"
|
| 1247 |
+
elif median_diff <= pd.Timedelta(days=1.5):
|
| 1248 |
+
freq = "D"
|
| 1249 |
+
elif median_diff <= pd.Timedelta(days=8):
|
| 1250 |
+
freq = "W"
|
| 1251 |
+
elif median_diff <= pd.Timedelta(days=35):
|
| 1252 |
+
freq = "MS"
|
| 1253 |
+
else:
|
| 1254 |
+
freq = "D"
|
| 1255 |
+
|
| 1256 |
+
try:
|
| 1257 |
+
ts_df = TimeSeriesDataFrame.from_data_frame(
|
| 1258 |
+
df, id_column=id_col_name, timestamp_column=time_col
|
| 1259 |
+
)
|
| 1260 |
+
except Exception as e:
|
| 1261 |
+
return {"status": "error", "message": f"Failed to create time series: {str(e)}"}
|
| 1262 |
+
|
| 1263 |
+
if Path(output_dir).exists():
|
| 1264 |
+
shutil.rmtree(output_dir, ignore_errors=True)
|
| 1265 |
+
|
| 1266 |
+
resource_config = _get_resource_config()
|
| 1267 |
+
|
| 1268 |
+
try:
|
| 1269 |
+
predictor = TimeSeriesPredictor(
|
| 1270 |
+
target=target_col,
|
| 1271 |
+
prediction_length=forecast_horizon,
|
| 1272 |
+
path=output_dir,
|
| 1273 |
+
freq=freq
|
| 1274 |
+
)
|
| 1275 |
+
|
| 1276 |
+
predictor.fit(
|
| 1277 |
+
train_data=ts_df,
|
| 1278 |
+
time_limit=min(time_limit, resource_config["time_limit"]),
|
| 1279 |
+
presets=presets,
|
| 1280 |
+
num_val_windows=num_val_windows
|
| 1281 |
+
)
|
| 1282 |
+
except Exception as e:
|
| 1283 |
+
return {"status": "error", "message": f"Backtest training failed: {str(e)}"}
|
| 1284 |
+
|
| 1285 |
+
elapsed = time.time() - start_time
|
| 1286 |
+
|
| 1287 |
+
# Get backtest predictions
|
| 1288 |
+
try:
|
| 1289 |
+
bt_preds = predictor.backtest_predictions()
|
| 1290 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 1291 |
+
bt_df = bt_preds.reset_index()
|
| 1292 |
+
bt_df.to_csv(output_path, index=False)
|
| 1293 |
+
except Exception:
|
| 1294 |
+
bt_preds = None
|
| 1295 |
+
|
| 1296 |
+
# Leaderboard
|
| 1297 |
+
leaderboard = predictor.leaderboard(silent=True)
|
| 1298 |
+
leaderboard_data = []
|
| 1299 |
+
for _, row in leaderboard.head(10).iterrows():
|
| 1300 |
+
leaderboard_data.append({
|
| 1301 |
+
"model": str(row.get("model", "")),
|
| 1302 |
+
"score_val": round(float(row.get("score_val", 0)), 4),
|
| 1303 |
+
"fit_time": round(float(row.get("fit_time", 0)), 1),
|
| 1304 |
+
})
|
| 1305 |
+
|
| 1306 |
+
best_model = predictor.model_best if hasattr(predictor, 'model_best') else "unknown"
|
| 1307 |
+
best_score = leaderboard_data[0]["score_val"] if leaderboard_data else None
|
| 1308 |
+
|
| 1309 |
+
print(f"\n{'='*60}")
|
| 1310 |
+
print(f"✅ BACKTESTING COMPLETE ({num_val_windows} windows)")
|
| 1311 |
+
print(f"{'='*60}")
|
| 1312 |
+
print(f"🏆 Best: {best_model} (score: {best_score})")
|
| 1313 |
+
print(f"⏱️ Time: {elapsed:.1f}s")
|
| 1314 |
+
print(f"{'='*60}\n")
|
| 1315 |
+
|
| 1316 |
+
return {
|
| 1317 |
+
"status": "success",
|
| 1318 |
+
"task_type": "backtesting",
|
| 1319 |
+
"num_val_windows": num_val_windows,
|
| 1320 |
+
"forecast_horizon": forecast_horizon,
|
| 1321 |
+
"best_model": best_model,
|
| 1322 |
+
"best_score": best_score,
|
| 1323 |
+
"n_models_trained": len(leaderboard),
|
| 1324 |
+
"training_time_seconds": round(elapsed, 1),
|
| 1325 |
+
"leaderboard": leaderboard_data,
|
| 1326 |
+
"output_path": output_path,
|
| 1327 |
+
"model_path": output_dir
|
| 1328 |
+
}
|
| 1329 |
+
|
| 1330 |
+
|
| 1331 |
+
# ============================================================
|
| 1332 |
+
# TIME SERIES ANALYSIS (#34, #35, #37)
|
| 1333 |
+
# ============================================================
|
| 1334 |
+
|
| 1335 |
+
def analyze_timeseries_model(
|
| 1336 |
+
model_path: str,
|
| 1337 |
+
data_path: str,
|
| 1338 |
+
time_col: str,
|
| 1339 |
+
id_col: Optional[str] = None,
|
| 1340 |
+
operation: str = "feature_importance",
|
| 1341 |
+
output_path: Optional[str] = None
|
| 1342 |
+
) -> Dict[str, Any]:
|
| 1343 |
+
"""
|
| 1344 |
+
Analyze a trained AutoGluon time series model.
|
| 1345 |
+
|
| 1346 |
+
Operations:
|
| 1347 |
+
- feature_importance: Permutation importance of covariates
|
| 1348 |
+
- plot: Generate forecast vs actuals visualization
|
| 1349 |
+
- make_future_dataframe: Generate future timestamp skeleton for prediction
|
| 1350 |
+
|
| 1351 |
+
Args:
|
| 1352 |
+
model_path: Path to saved AutoGluon TimeSeriesPredictor
|
| 1353 |
+
data_path: Path to time series data
|
| 1354 |
+
time_col: Column with timestamps/dates
|
| 1355 |
+
id_col: Column identifying different series
|
| 1356 |
+
operation: One of 'feature_importance', 'plot', 'make_future_dataframe'
|
| 1357 |
+
output_path: Path to save output
|
| 1358 |
+
|
| 1359 |
+
Returns:
|
| 1360 |
+
Dictionary with analysis results
|
| 1361 |
+
"""
|
| 1362 |
+
TimeSeriesPredictor, TimeSeriesDataFrame = _ensure_autogluon_timeseries()
|
| 1363 |
+
|
| 1364 |
+
if not Path(model_path).exists():
|
| 1365 |
+
return {"status": "error", "message": f"Model not found: {model_path}"}
|
| 1366 |
+
if not Path(data_path).exists():
|
| 1367 |
+
return {"status": "error", "message": f"Data not found: {data_path}"}
|
| 1368 |
+
|
| 1369 |
+
try:
|
| 1370 |
+
predictor = TimeSeriesPredictor.load(model_path)
|
| 1371 |
+
except Exception as e:
|
| 1372 |
+
return {"status": "error", "message": f"Failed to load model: {str(e)}"}
|
| 1373 |
+
|
| 1374 |
+
# Reconstruct TimeSeriesDataFrame
|
| 1375 |
+
try:
|
| 1376 |
+
df = pd.read_csv(data_path)
|
| 1377 |
+
df[time_col] = pd.to_datetime(df[time_col])
|
| 1378 |
+
df = df.sort_values(time_col)
|
| 1379 |
+
|
| 1380 |
+
if id_col is None or id_col not in df.columns:
|
| 1381 |
+
id_col_name = "__series_id"
|
| 1382 |
+
df[id_col_name] = "series_0"
|
| 1383 |
+
else:
|
| 1384 |
+
id_col_name = id_col
|
| 1385 |
+
|
| 1386 |
+
ts_df = TimeSeriesDataFrame.from_data_frame(
|
| 1387 |
+
df, id_column=id_col_name, timestamp_column=time_col
|
| 1388 |
+
)
|
| 1389 |
+
except Exception as e:
|
| 1390 |
+
return {"status": "error", "message": f"Failed to create time series data: {str(e)}"}
|
| 1391 |
+
|
| 1392 |
+
try:
|
| 1393 |
+
if operation == "feature_importance":
|
| 1394 |
+
fi = predictor.feature_importance(ts_df)
|
| 1395 |
+
|
| 1396 |
+
fi_data = []
|
| 1397 |
+
if isinstance(fi, pd.DataFrame):
|
| 1398 |
+
for feat in fi.index:
|
| 1399 |
+
row_data = {"feature": str(feat)}
|
| 1400 |
+
for col in fi.columns:
|
| 1401 |
+
try:
|
| 1402 |
+
row_data[str(col)] = round(float(fi.loc[feat, col]), 4)
|
| 1403 |
+
except (TypeError, ValueError):
|
| 1404 |
+
row_data[str(col)] = str(fi.loc[feat, col])
|
| 1405 |
+
fi_data.append(row_data)
|
| 1406 |
+
|
| 1407 |
+
return {
|
| 1408 |
+
"status": "success",
|
| 1409 |
+
"operation": "feature_importance",
|
| 1410 |
+
"features": fi_data,
|
| 1411 |
+
"model_path": model_path,
|
| 1412 |
+
"message": f"Feature importance computed for {len(fi_data)} features"
|
| 1413 |
+
}
|
| 1414 |
+
|
| 1415 |
+
elif operation == "plot":
|
| 1416 |
+
output_path = output_path or "./outputs/plots/ts_forecast_plot.png"
|
| 1417 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 1418 |
+
|
| 1419 |
+
import matplotlib
|
| 1420 |
+
matplotlib.use('Agg')
|
| 1421 |
+
import matplotlib.pyplot as plt
|
| 1422 |
+
|
| 1423 |
+
predictions = predictor.predict(ts_df)
|
| 1424 |
+
|
| 1425 |
+
try:
|
| 1426 |
+
predictor.plot(ts_df, predictions, quantile_levels=[0.1, 0.9])
|
| 1427 |
+
plt.savefig(output_path, dpi=150, bbox_inches='tight')
|
| 1428 |
+
plt.close()
|
| 1429 |
+
except Exception:
|
| 1430 |
+
# Fallback: manual plot
|
| 1431 |
+
fig, ax = plt.subplots(figsize=(12, 6))
|
| 1432 |
+
target = predictor.target
|
| 1433 |
+
|
| 1434 |
+
for item_id in list(ts_df.item_ids)[:3]:
|
| 1435 |
+
actual = ts_df.loc[item_id][target].tail(100)
|
| 1436 |
+
ax.plot(actual.index, actual.values, label=f'Actual ({item_id})', linewidth=1.5)
|
| 1437 |
+
|
| 1438 |
+
if item_id in predictions.item_ids:
|
| 1439 |
+
pred = predictions.loc[item_id]
|
| 1440 |
+
mean_col = "mean" if "mean" in pred.columns else pred.columns[0]
|
| 1441 |
+
ax.plot(pred.index, pred[mean_col].values, '--', label=f'Forecast ({item_id})', linewidth=1.5)
|
| 1442 |
+
|
| 1443 |
+
ax.set_title(f'Time Series Forecast - {predictor.model_best}')
|
| 1444 |
+
ax.legend()
|
| 1445 |
+
ax.grid(True, alpha=0.3)
|
| 1446 |
+
plt.tight_layout()
|
| 1447 |
+
plt.savefig(output_path, dpi=150, bbox_inches='tight')
|
| 1448 |
+
plt.close()
|
| 1449 |
+
|
| 1450 |
+
return {
|
| 1451 |
+
"status": "success",
|
| 1452 |
+
"operation": "plot",
|
| 1453 |
+
"output_path": output_path,
|
| 1454 |
+
"message": f"Forecast plot saved to {output_path}"
|
| 1455 |
+
}
|
| 1456 |
+
|
| 1457 |
+
elif operation == "make_future_dataframe":
|
| 1458 |
+
output_path = output_path or "./outputs/future_dataframe.csv"
|
| 1459 |
+
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
|
| 1460 |
+
|
| 1461 |
+
future_df = predictor.make_future_data_frame(ts_df)
|
| 1462 |
+
future_df.reset_index().to_csv(output_path, index=False)
|
| 1463 |
+
|
| 1464 |
+
return {
|
| 1465 |
+
"status": "success",
|
| 1466 |
+
"operation": "make_future_dataframe",
|
| 1467 |
+
"shape": list(future_df.shape),
|
| 1468 |
+
"columns": list(future_df.columns) if hasattr(future_df, 'columns') else [],
|
| 1469 |
+
"output_path": output_path,
|
| 1470 |
+
"message": f"Future dataframe generated: {len(future_df)} rows"
|
| 1471 |
+
}
|
| 1472 |
+
|
| 1473 |
+
else:
|
| 1474 |
+
return {
|
| 1475 |
+
"status": "error",
|
| 1476 |
+
"message": f"Unknown operation '{operation}'. Choose: feature_importance, plot, make_future_dataframe"
|
| 1477 |
+
}
|
| 1478 |
+
|
| 1479 |
+
except Exception as e:
|
| 1480 |
+
return {"status": "error", "message": f"Analysis failed: {str(e)}"}
|
src/tools/business_intelligence.py
CHANGED
|
@@ -649,8 +649,69 @@ def detect_causal_relationships(
|
|
| 649 |
result["statistical_significance"] = float(p_value)
|
| 650 |
result["causal_effect"] = float(uplift)
|
| 651 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 652 |
else:
|
| 653 |
-
raise ValueError(f"Unknown method '{method}'. Use 'granger', 'propensity', or '
|
| 654 |
|
| 655 |
print(f"✅ Causal analysis complete!")
|
| 656 |
if result.get("causal_effect") is not None:
|
|
|
|
| 649 |
result["statistical_significance"] = float(p_value)
|
| 650 |
result["causal_effect"] = float(uplift)
|
| 651 |
|
| 652 |
+
elif method == "dowhy":
|
| 653 |
+
# DoWhy causal inference - formal causal graph approach
|
| 654 |
+
try:
|
| 655 |
+
import dowhy
|
| 656 |
+
from dowhy import CausalModel
|
| 657 |
+
except ImportError:
|
| 658 |
+
raise ValueError("dowhy not installed. Install with: pip install dowhy>=0.11")
|
| 659 |
+
|
| 660 |
+
print(" Building DoWhy causal model...")
|
| 661 |
+
|
| 662 |
+
df = data.to_pandas()
|
| 663 |
+
|
| 664 |
+
# Build causal model
|
| 665 |
+
# Construct a simple causal graph: covariates -> treatment -> outcome
|
| 666 |
+
if covariates:
|
| 667 |
+
graph_dot = f'digraph {{ {treatment_column} -> {outcome_column};'
|
| 668 |
+
for cov in covariates:
|
| 669 |
+
graph_dot += f' {cov} -> {treatment_column}; {cov} -> {outcome_column};'
|
| 670 |
+
graph_dot += ' }'
|
| 671 |
+
else:
|
| 672 |
+
graph_dot = f'digraph {{ {treatment_column} -> {outcome_column}; }}'
|
| 673 |
+
|
| 674 |
+
model = CausalModel(
|
| 675 |
+
data=df,
|
| 676 |
+
treatment=treatment_column,
|
| 677 |
+
outcome=outcome_column,
|
| 678 |
+
common_causes=covariates,
|
| 679 |
+
graph=graph_dot
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
# Identify causal effect
|
| 683 |
+
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
|
| 684 |
+
|
| 685 |
+
# Estimate using linear regression (lightweight)
|
| 686 |
+
estimate = model.estimate_effect(
|
| 687 |
+
identified_estimand,
|
| 688 |
+
method_name="backdoor.linear_regression"
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
# Refutation test (placebo treatment)
|
| 692 |
+
try:
|
| 693 |
+
refutation = model.refute_estimate(
|
| 694 |
+
identified_estimand,
|
| 695 |
+
estimate,
|
| 696 |
+
method_name="placebo_treatment_refuter",
|
| 697 |
+
placebo_type="permute",
|
| 698 |
+
num_simulations=20
|
| 699 |
+
)
|
| 700 |
+
refutation_result = {
|
| 701 |
+
"new_effect": float(refutation.new_effect) if hasattr(refutation, 'new_effect') else None,
|
| 702 |
+
"p_value": float(refutation.refutation_result.get('p_value', 1.0)) if hasattr(refutation, 'refutation_result') and isinstance(refutation.refutation_result, dict) else None
|
| 703 |
+
}
|
| 704 |
+
except Exception:
|
| 705 |
+
refutation_result = {"note": "Refutation test could not be completed"}
|
| 706 |
+
|
| 707 |
+
result["causal_effect"] = float(estimate.value)
|
| 708 |
+
result["estimand"] = str(identified_estimand)
|
| 709 |
+
result["estimation_method"] = "backdoor.linear_regression"
|
| 710 |
+
result["refutation"] = refutation_result
|
| 711 |
+
result["statistical_significance"] = None # DoWhy uses refutation instead
|
| 712 |
+
|
| 713 |
else:
|
| 714 |
+
raise ValueError(f"Unknown method '{method}'. Use 'granger', 'propensity', 'uplift', or 'dowhy'")
|
| 715 |
|
| 716 |
print(f"✅ Causal analysis complete!")
|
| 717 |
if result.get("causal_effect") is not None:
|
src/tools/code_interpreter.py
CHANGED
|
@@ -177,8 +177,10 @@ except Exception as e:
|
|
| 177 |
|
| 178 |
# Track existing files BEFORE execution to detect new files
|
| 179 |
existing_files = set()
|
|
|
|
|
|
|
| 180 |
if allow_file_operations:
|
| 181 |
-
for output_dir in
|
| 182 |
if os.path.exists(output_dir):
|
| 183 |
for file_path in Path(output_dir).resolve().glob('**/*'):
|
| 184 |
if file_path.is_file():
|
|
@@ -229,9 +231,11 @@ except Exception as e:
|
|
| 229 |
|
| 230 |
# Success! Find NEWLY generated files (not existing before execution)
|
| 231 |
generated_files = []
|
|
|
|
|
|
|
| 232 |
if allow_file_operations:
|
| 233 |
cwd = Path.cwd()
|
| 234 |
-
for output_dir in
|
| 235 |
if os.path.exists(output_dir):
|
| 236 |
abs_output_dir = Path(output_dir).resolve()
|
| 237 |
for file_path in abs_output_dir.glob('**/*'):
|
|
|
|
| 177 |
|
| 178 |
# Track existing files BEFORE execution to detect new files
|
| 179 |
existing_files = set()
|
| 180 |
+
# 🔥 FIX: Also scan /tmp/data_science_agent/ since LLM often saves files there
|
| 181 |
+
scan_dirs = ['./outputs/code', './outputs/data', './outputs/plots', '/tmp/data_science_agent']
|
| 182 |
if allow_file_operations:
|
| 183 |
+
for output_dir in scan_dirs:
|
| 184 |
if os.path.exists(output_dir):
|
| 185 |
for file_path in Path(output_dir).resolve().glob('**/*'):
|
| 186 |
if file_path.is_file():
|
|
|
|
| 231 |
|
| 232 |
# Success! Find NEWLY generated files (not existing before execution)
|
| 233 |
generated_files = []
|
| 234 |
+
# 🔥 FIX: Also scan /tmp/data_science_agent/ for files created by LLM code
|
| 235 |
+
scan_dirs = ['./outputs/code', './outputs/data', './outputs/plots', '/tmp/data_science_agent']
|
| 236 |
if allow_file_operations:
|
| 237 |
cwd = Path.cwd()
|
| 238 |
+
for output_dir in scan_dirs:
|
| 239 |
if os.path.exists(output_dir):
|
| 240 |
abs_output_dir = Path(output_dir).resolve()
|
| 241 |
for file_path in abs_output_dir.glob('**/*'):
|
src/tools/data_cleaning.py
CHANGED
|
@@ -124,10 +124,81 @@ def clean_missing_values(file_path: str, strategy,
|
|
| 124 |
strategy = strategy_dict
|
| 125 |
print(f"🔧 Applying '{list(strategy_dict.values())[0] if strategy_dict else strategy}' strategy to {len(strategy_dict)} columns with missing values")
|
| 126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
else:
|
| 128 |
return {
|
| 129 |
"success": False,
|
| 130 |
-
"error": f"Invalid strategy '{strategy}'. Use 'auto', 'median', 'mean', 'mode', 'forward_fill', 'drop', or provide a dictionary.",
|
| 131 |
"error_type": "ValueError"
|
| 132 |
}
|
| 133 |
|
|
|
|
| 124 |
strategy = strategy_dict
|
| 125 |
print(f"🔧 Applying '{list(strategy_dict.values())[0] if strategy_dict else strategy}' strategy to {len(strategy_dict)} columns with missing values")
|
| 126 |
|
| 127 |
+
elif strategy in ["iterative", "mice"]:
|
| 128 |
+
# MICE / Iterative Imputation using sklearn IterativeImputer
|
| 129 |
+
# This handles ALL numeric columns at once (multivariate imputation)
|
| 130 |
+
print(f"🔧 Applying Iterative (MICE) imputation to numeric columns...")
|
| 131 |
+
try:
|
| 132 |
+
from sklearn.experimental import enable_iterative_imputer # noqa: F401
|
| 133 |
+
from sklearn.impute import IterativeImputer
|
| 134 |
+
from sklearn.linear_model import BayesianRidge
|
| 135 |
+
import pandas as pd
|
| 136 |
+
|
| 137 |
+
# Identify numeric columns with missing values
|
| 138 |
+
numeric_cols_with_nulls = [
|
| 139 |
+
col for col in numeric_cols if df[col].null_count() > 0
|
| 140 |
+
]
|
| 141 |
+
|
| 142 |
+
if not numeric_cols_with_nulls:
|
| 143 |
+
print(" ℹ️ No numeric columns with missing values for MICE imputation")
|
| 144 |
+
else:
|
| 145 |
+
# Convert numeric columns to pandas for IterativeImputer
|
| 146 |
+
df_pd = df.select(numeric_cols).to_pandas()
|
| 147 |
+
|
| 148 |
+
# Fit and transform
|
| 149 |
+
imputer = IterativeImputer(
|
| 150 |
+
estimator=BayesianRidge(),
|
| 151 |
+
max_iter=10,
|
| 152 |
+
random_state=42,
|
| 153 |
+
missing_values=float('nan')
|
| 154 |
+
)
|
| 155 |
+
imputed_data = imputer.fit_transform(df_pd)
|
| 156 |
+
|
| 157 |
+
# Replace columns back in Polars DataFrame
|
| 158 |
+
for i, col_name in enumerate(numeric_cols):
|
| 159 |
+
df = df.with_columns(
|
| 160 |
+
pl.Series(col_name, imputed_data[:, i])
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
for col_name in numeric_cols_with_nulls:
|
| 164 |
+
report["columns_processed"][col_name] = {
|
| 165 |
+
"status": "success",
|
| 166 |
+
"strategy": "iterative_mice",
|
| 167 |
+
"nulls_before": int(df[col_name].null_count()), # Should be 0 now
|
| 168 |
+
"nulls_after": 0
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
print(f" ✅ MICE imputed {len(numeric_cols_with_nulls)} numeric columns using {len(numeric_cols)} features")
|
| 172 |
+
|
| 173 |
+
# Handle remaining non-numeric columns with mode
|
| 174 |
+
for col in df.columns:
|
| 175 |
+
if df[col].null_count() > 0 and col not in numeric_cols:
|
| 176 |
+
mode_val = df[col].drop_nulls().mode().first()
|
| 177 |
+
if mode_val is not None:
|
| 178 |
+
df = df.with_columns(
|
| 179 |
+
pl.col(col).fill_null(mode_val).alias(col)
|
| 180 |
+
)
|
| 181 |
+
report["columns_processed"][col] = {
|
| 182 |
+
"status": "success",
|
| 183 |
+
"strategy": "mode (non-numeric fallback)",
|
| 184 |
+
"nulls_before": int(df[col].null_count()),
|
| 185 |
+
"nulls_after": 0
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
except ImportError:
|
| 189 |
+
return {
|
| 190 |
+
"success": False,
|
| 191 |
+
"error": "IterativeImputer requires scikit-learn >= 1.4. Install with: pip install scikit-learn>=1.4",
|
| 192 |
+
"error_type": "MissingDependency"
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
# Skip per-column processing for MICE (already handled above)
|
| 196 |
+
strategy = {}
|
| 197 |
+
|
| 198 |
else:
|
| 199 |
return {
|
| 200 |
"success": False,
|
| 201 |
+
"error": f"Invalid strategy '{strategy}'. Use 'auto', 'median', 'mean', 'mode', 'forward_fill', 'drop', 'iterative', 'mice', or provide a dictionary.",
|
| 202 |
"error_type": "ValueError"
|
| 203 |
}
|
| 204 |
|
src/tools/data_profiling.py
CHANGED
|
@@ -486,3 +486,108 @@ def analyze_correlations(file_path: str, target: Optional[str] = None) -> Dict[s
|
|
| 486 |
}
|
| 487 |
|
| 488 |
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 486 |
}
|
| 487 |
|
| 488 |
return result
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def detect_label_errors(
|
| 492 |
+
file_path: str,
|
| 493 |
+
target_col: str,
|
| 494 |
+
features: Optional[List[str]] = None,
|
| 495 |
+
n_folds: int = 5,
|
| 496 |
+
output_path: Optional[str] = None
|
| 497 |
+
) -> Dict[str, Any]:
|
| 498 |
+
"""
|
| 499 |
+
Detect potential label errors in a classification dataset using cleanlab.
|
| 500 |
+
|
| 501 |
+
Uses confident learning to find mislabeled examples by:
|
| 502 |
+
1. Training cross-validated classifiers
|
| 503 |
+
2. Computing out-of-sample predicted probabilities
|
| 504 |
+
3. Identifying labels that disagree with model predictions
|
| 505 |
+
|
| 506 |
+
Args:
|
| 507 |
+
file_path: Path to dataset
|
| 508 |
+
target_col: Target/label column name
|
| 509 |
+
features: Feature columns to use (None = all numeric)
|
| 510 |
+
n_folds: Number of cross-validation folds
|
| 511 |
+
output_path: Optional path to save flagged rows
|
| 512 |
+
|
| 513 |
+
Returns:
|
| 514 |
+
Dictionary with label error analysis results
|
| 515 |
+
"""
|
| 516 |
+
try:
|
| 517 |
+
from cleanlab.classification import CleanLearning
|
| 518 |
+
except ImportError:
|
| 519 |
+
return {
|
| 520 |
+
'status': 'error',
|
| 521 |
+
'message': 'cleanlab not installed. Install with: pip install cleanlab>=2.6'
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
from sklearn.linear_model import LogisticRegression
|
| 525 |
+
from sklearn.preprocessing import LabelEncoder
|
| 526 |
+
|
| 527 |
+
validate_file_exists(file_path)
|
| 528 |
+
validate_file_format(file_path)
|
| 529 |
+
|
| 530 |
+
df = load_dataframe(file_path)
|
| 531 |
+
validate_dataframe(df)
|
| 532 |
+
validate_column_exists(df, target_col)
|
| 533 |
+
|
| 534 |
+
print(f"🔍 Detecting label errors in '{target_col}' using cleanlab...")
|
| 535 |
+
|
| 536 |
+
# Get features
|
| 537 |
+
if features is None:
|
| 538 |
+
features = get_numeric_columns(df)
|
| 539 |
+
features = [f for f in features if f != target_col]
|
| 540 |
+
|
| 541 |
+
if not features:
|
| 542 |
+
return {'status': 'error', 'message': 'No numeric features found for label error detection'}
|
| 543 |
+
|
| 544 |
+
# Convert to pandas/numpy
|
| 545 |
+
df_pd = df.to_pandas()
|
| 546 |
+
X = df_pd[features].fillna(0).values
|
| 547 |
+
y_raw = df_pd[target_col].values
|
| 548 |
+
|
| 549 |
+
# Encode labels
|
| 550 |
+
le = LabelEncoder()
|
| 551 |
+
y = le.fit_transform(y_raw)
|
| 552 |
+
|
| 553 |
+
# Use CleanLearning to find label issues
|
| 554 |
+
cl = CleanLearning(
|
| 555 |
+
clf=LogisticRegression(max_iter=500, solver='lbfgs', multi_class='auto'),
|
| 556 |
+
cv_n_folds=n_folds
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
label_issues = cl.find_label_issues(X, y)
|
| 560 |
+
|
| 561 |
+
# Extract results
|
| 562 |
+
n_issues = label_issues['is_label_issue'].sum()
|
| 563 |
+
issue_indices = label_issues[label_issues['is_label_issue']].index.tolist()
|
| 564 |
+
|
| 565 |
+
# Get details for flagged rows
|
| 566 |
+
flagged_rows = []
|
| 567 |
+
for idx in issue_indices[:50]: # Limit to top 50
|
| 568 |
+
flagged_rows.append({
|
| 569 |
+
'row_index': int(idx),
|
| 570 |
+
'current_label': str(y_raw[idx]),
|
| 571 |
+
'suggested_label': str(le.inverse_transform([label_issues.loc[idx, 'predicted_label']])[0]) if 'predicted_label' in label_issues.columns else 'unknown',
|
| 572 |
+
'confidence': float(1 - label_issues.loc[idx, 'label_quality']) if 'label_quality' in label_issues.columns else None
|
| 573 |
+
})
|
| 574 |
+
|
| 575 |
+
print(f" 🚨 Found {n_issues} potential label errors ({n_issues/len(y)*100:.1f}%)")
|
| 576 |
+
|
| 577 |
+
# Save flagged rows
|
| 578 |
+
if output_path and issue_indices:
|
| 579 |
+
flagged_df = df_pd.iloc[issue_indices]
|
| 580 |
+
flagged_df.to_csv(output_path, index=False)
|
| 581 |
+
print(f" 💾 Flagged rows saved to: {output_path}")
|
| 582 |
+
|
| 583 |
+
return {
|
| 584 |
+
'status': 'success',
|
| 585 |
+
'total_samples': len(y),
|
| 586 |
+
'label_errors_found': int(n_issues),
|
| 587 |
+
'error_percentage': round(n_issues / len(y) * 100, 2),
|
| 588 |
+
'flagged_rows': flagged_rows,
|
| 589 |
+
'n_classes': len(le.classes_),
|
| 590 |
+
'classes': le.classes_.tolist(),
|
| 591 |
+
'output_path': output_path,
|
| 592 |
+
'recommendation': f'Review {n_issues} flagged samples for potential mislabeling' if n_issues > 0 else 'No label errors detected'
|
| 593 |
+
}
|
src/tools/eda_reports.py
CHANGED
|
@@ -161,3 +161,100 @@ def generate_ydata_profiling_report(
|
|
| 161 |
"error": f"Failed to generate ydata-profiling report: {str(e)}",
|
| 162 |
"error_type": type(e).__name__
|
| 163 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
"error": f"Failed to generate ydata-profiling report: {str(e)}",
|
| 162 |
"error_type": type(e).__name__
|
| 163 |
}
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def generate_sweetviz_report(
|
| 167 |
+
file_path: str,
|
| 168 |
+
target_col: Optional[str] = None,
|
| 169 |
+
compare_file_path: Optional[str] = None,
|
| 170 |
+
output_path: str = "./outputs/reports/sweetviz_report.html",
|
| 171 |
+
title: str = "Sweetviz EDA Report"
|
| 172 |
+
) -> Dict[str, Any]:
|
| 173 |
+
"""
|
| 174 |
+
Generate an interactive EDA report using Sweetviz.
|
| 175 |
+
|
| 176 |
+
Sweetviz provides:
|
| 177 |
+
- Feature-by-feature analysis with distributions
|
| 178 |
+
- Target analysis (associations with target variable)
|
| 179 |
+
- Dataset comparison (train vs test)
|
| 180 |
+
- Correlations/associations for numeric and categorical features
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
file_path: Path to the dataset CSV file
|
| 184 |
+
target_col: Optional target column for supervised analysis
|
| 185 |
+
compare_file_path: Optional second dataset for comparison (e.g., test set)
|
| 186 |
+
output_path: Where to save the HTML report
|
| 187 |
+
title: Title for the report
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
Dict with success status and report path
|
| 191 |
+
"""
|
| 192 |
+
try:
|
| 193 |
+
import sweetviz as sv
|
| 194 |
+
import pandas as pd
|
| 195 |
+
except ImportError:
|
| 196 |
+
return {
|
| 197 |
+
"success": False,
|
| 198 |
+
"error": "sweetviz not installed. Install with: pip install sweetviz>=2.3",
|
| 199 |
+
"error_type": "MissingDependency"
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
try:
|
| 203 |
+
# Read dataset
|
| 204 |
+
if file_path.endswith('.csv'):
|
| 205 |
+
df = pd.read_csv(file_path)
|
| 206 |
+
elif file_path.endswith('.parquet'):
|
| 207 |
+
df = pd.read_parquet(file_path)
|
| 208 |
+
else:
|
| 209 |
+
raise ValueError(f"Unsupported file format: {file_path}")
|
| 210 |
+
|
| 211 |
+
# Create output directory
|
| 212 |
+
os.makedirs(os.path.dirname(output_path) or "./outputs/reports", exist_ok=True)
|
| 213 |
+
|
| 214 |
+
# Generate report
|
| 215 |
+
if compare_file_path:
|
| 216 |
+
# Comparison report (train vs test)
|
| 217 |
+
if compare_file_path.endswith('.csv'):
|
| 218 |
+
df_compare = pd.read_csv(compare_file_path)
|
| 219 |
+
else:
|
| 220 |
+
df_compare = pd.read_parquet(compare_file_path)
|
| 221 |
+
|
| 222 |
+
print(f"📊 Generating Sweetviz comparison report...")
|
| 223 |
+
if target_col and target_col in df.columns:
|
| 224 |
+
report = sv.compare([df, "Dataset 1"], [df_compare, "Dataset 2"], target_feat=target_col)
|
| 225 |
+
else:
|
| 226 |
+
report = sv.compare([df, "Dataset 1"], [df_compare, "Dataset 2"])
|
| 227 |
+
else:
|
| 228 |
+
# Single dataset analysis
|
| 229 |
+
print(f"📊 Generating Sweetviz EDA report...")
|
| 230 |
+
if target_col and target_col in df.columns:
|
| 231 |
+
report = sv.analyze(df, target_feat=target_col)
|
| 232 |
+
else:
|
| 233 |
+
report = sv.analyze(df)
|
| 234 |
+
|
| 235 |
+
# Save report (show_html=False prevents auto-opening browser)
|
| 236 |
+
report.show_html(output_path, open_browser=False)
|
| 237 |
+
|
| 238 |
+
num_features = len(df.columns)
|
| 239 |
+
num_rows = len(df)
|
| 240 |
+
|
| 241 |
+
print(f"✅ Sweetviz report saved to: {output_path}")
|
| 242 |
+
|
| 243 |
+
return {
|
| 244 |
+
"success": True,
|
| 245 |
+
"report_path": output_path,
|
| 246 |
+
"message": f"✅ Sweetviz report generated at: {output_path}",
|
| 247 |
+
"statistics": {
|
| 248 |
+
"rows": num_rows,
|
| 249 |
+
"columns": num_features,
|
| 250 |
+
"target_column": target_col,
|
| 251 |
+
"comparison_mode": compare_file_path is not None
|
| 252 |
+
}
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
except Exception as e:
|
| 256 |
+
return {
|
| 257 |
+
"success": False,
|
| 258 |
+
"error": f"Failed to generate Sweetviz report: {str(e)}",
|
| 259 |
+
"error_type": type(e).__name__
|
| 260 |
+
}
|
src/tools/feature_engineering.py
CHANGED
|
@@ -242,7 +242,7 @@ def encode_categorical(file_path: str, method: str = "auto", columns: Optional[L
|
|
| 242 |
# Create new column with frequencies
|
| 243 |
new_col_name = f"{col}_freq"
|
| 244 |
df = df.with_columns(
|
| 245 |
-
pl.col(col).
|
| 246 |
)
|
| 247 |
|
| 248 |
# Drop original column
|
|
@@ -274,7 +274,7 @@ def encode_categorical(file_path: str, method: str = "auto", columns: Optional[L
|
|
| 274 |
# Create new column with target encoding
|
| 275 |
new_col_name = f"{col}_target_enc"
|
| 276 |
df = df.with_columns(
|
| 277 |
-
pl.col(col).
|
| 278 |
)
|
| 279 |
|
| 280 |
# Drop original column
|
|
|
|
| 242 |
# Create new column with frequencies
|
| 243 |
new_col_name = f"{col}_freq"
|
| 244 |
df = df.with_columns(
|
| 245 |
+
pl.col(col).replace_strict(freq_map, default=0.0).alias(new_col_name)
|
| 246 |
)
|
| 247 |
|
| 248 |
# Drop original column
|
|
|
|
| 274 |
# Create new column with target encoding
|
| 275 |
new_col_name = f"{col}_target_enc"
|
| 276 |
df = df.with_columns(
|
| 277 |
+
pl.col(col).replace_strict(target_map, default=global_mean).alias(new_col_name)
|
| 278 |
)
|
| 279 |
|
| 280 |
# Drop original column
|
src/tools/nlp_text_analytics.py
CHANGED
|
@@ -483,20 +483,48 @@ def analyze_sentiment_advanced(
|
|
| 483 |
result["emotions"] = None
|
| 484 |
|
| 485 |
else:
|
| 486 |
-
#
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 500 |
|
| 501 |
# Aspect-based sentiment
|
| 502 |
if aspects:
|
|
|
|
| 483 |
result["emotions"] = None
|
| 484 |
|
| 485 |
else:
|
| 486 |
+
# Check if method is 'vader' - use vaderSentiment
|
| 487 |
+
if method == "vader":
|
| 488 |
+
try:
|
| 489 |
+
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
| 490 |
+
print(" Using VADER for sentiment analysis...")
|
| 491 |
+
|
| 492 |
+
analyzer = SentimentIntensityAnalyzer()
|
| 493 |
+
sentiments = []
|
| 494 |
+
for text in texts_clean:
|
| 495 |
+
scores = analyzer.polarity_scores(text)
|
| 496 |
+
label = "POSITIVE" if scores['compound'] > 0.05 else "NEGATIVE" if scores['compound'] < -0.05 else "NEUTRAL"
|
| 497 |
+
sentiments.append({
|
| 498 |
+
"compound": scores['compound'],
|
| 499 |
+
"positive": scores['pos'],
|
| 500 |
+
"negative": scores['neg'],
|
| 501 |
+
"neutral": scores['neu'],
|
| 502 |
+
"label": label,
|
| 503 |
+
"text": text[:100]
|
| 504 |
+
})
|
| 505 |
+
|
| 506 |
+
result["sentiments"] = sentiments
|
| 507 |
+
|
| 508 |
+
except ImportError:
|
| 509 |
+
print("⚠️ vaderSentiment not installed. Falling back to TextBlob.")
|
| 510 |
+
print(" Install with: pip install vaderSentiment>=3.3")
|
| 511 |
+
method = "textblob"
|
| 512 |
+
|
| 513 |
+
if method in ["textblob", "transformer"]:
|
| 514 |
+
# Fallback to TextBlob
|
| 515 |
+
print(" Using TextBlob for sentiment analysis...")
|
| 516 |
+
|
| 517 |
+
sentiments = []
|
| 518 |
+
for text in texts_clean:
|
| 519 |
+
blob = TextBlob(text)
|
| 520 |
+
sentiments.append({
|
| 521 |
+
"polarity": blob.sentiment.polarity,
|
| 522 |
+
"subjectivity": blob.sentiment.subjectivity,
|
| 523 |
+
"label": "POSITIVE" if blob.sentiment.polarity > 0 else "NEGATIVE" if blob.sentiment.polarity < 0 else "NEUTRAL",
|
| 524 |
+
"text": text[:100]
|
| 525 |
+
})
|
| 526 |
+
|
| 527 |
+
result["sentiments"] = sentiments
|
| 528 |
|
| 529 |
# Aspect-based sentiment
|
| 530 |
if aspects:
|
src/tools/production_mlops.py
CHANGED
|
@@ -659,3 +659,194 @@ def detect_feature_leakage(
|
|
| 659 |
'total_issues': total_issues,
|
| 660 |
'recommendation': 'Review and remove suspicious features before training' if total_issues > 0 else 'No obvious leakage detected'
|
| 661 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 659 |
'total_issues': total_issues,
|
| 660 |
'recommendation': 'Review and remove suspicious features before training' if total_issues > 0 else 'No obvious leakage detected'
|
| 661 |
}
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
def monitor_drift_evidently(
|
| 665 |
+
reference_data_path: str,
|
| 666 |
+
current_data_path: str,
|
| 667 |
+
output_path: Optional[str] = None
|
| 668 |
+
) -> Dict[str, Any]:
|
| 669 |
+
"""
|
| 670 |
+
Generate a comprehensive data drift report using Evidently AI.
|
| 671 |
+
|
| 672 |
+
Evidently provides production-grade drift detection with:
|
| 673 |
+
- Statistical tests per feature (KS, Chi-squared, Jensen-Shannon)
|
| 674 |
+
- Data quality metrics
|
| 675 |
+
- Interactive HTML dashboard
|
| 676 |
+
|
| 677 |
+
Args:
|
| 678 |
+
reference_data_path: Path to training/reference dataset
|
| 679 |
+
current_data_path: Path to production/current dataset
|
| 680 |
+
output_path: Path to save HTML drift report
|
| 681 |
+
|
| 682 |
+
Returns:
|
| 683 |
+
Dictionary with drift metrics and report path
|
| 684 |
+
"""
|
| 685 |
+
try:
|
| 686 |
+
from evidently.report import Report
|
| 687 |
+
from evidently.metric_preset import DataDriftPreset, DataQualityPreset
|
| 688 |
+
except ImportError:
|
| 689 |
+
return {
|
| 690 |
+
'status': 'error',
|
| 691 |
+
'message': 'evidently not installed. Install with: pip install evidently>=0.4'
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
import pandas as pd_ev
|
| 695 |
+
|
| 696 |
+
validate_file_exists(reference_data_path)
|
| 697 |
+
validate_file_exists(current_data_path)
|
| 698 |
+
|
| 699 |
+
# Load data as pandas (evidently requires pandas)
|
| 700 |
+
ref_df = load_dataframe(reference_data_path).to_pandas()
|
| 701 |
+
curr_df = load_dataframe(current_data_path).to_pandas()
|
| 702 |
+
|
| 703 |
+
print("🔍 Generating Evidently drift report...")
|
| 704 |
+
|
| 705 |
+
# Create drift report
|
| 706 |
+
report = Report(metrics=[
|
| 707 |
+
DataDriftPreset(),
|
| 708 |
+
DataQualityPreset()
|
| 709 |
+
])
|
| 710 |
+
|
| 711 |
+
report.run(reference_data=ref_df, current_data=curr_df)
|
| 712 |
+
|
| 713 |
+
# Save HTML report
|
| 714 |
+
if output_path is None:
|
| 715 |
+
output_path = "./outputs/reports/evidently_drift_report.html"
|
| 716 |
+
|
| 717 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 718 |
+
report.save_html(output_path)
|
| 719 |
+
|
| 720 |
+
# Extract results as dict
|
| 721 |
+
report_dict = report.as_dict()
|
| 722 |
+
|
| 723 |
+
# Parse drift results
|
| 724 |
+
drift_metrics = report_dict.get('metrics', [])
|
| 725 |
+
|
| 726 |
+
drifted_features = []
|
| 727 |
+
total_features = 0
|
| 728 |
+
for metric in drift_metrics:
|
| 729 |
+
result_data = metric.get('result', {})
|
| 730 |
+
if 'drift_by_columns' in result_data:
|
| 731 |
+
for col_name, col_data in result_data['drift_by_columns'].items():
|
| 732 |
+
total_features += 1
|
| 733 |
+
if col_data.get('drift_detected', False):
|
| 734 |
+
drifted_features.append(col_name)
|
| 735 |
+
|
| 736 |
+
print(f"✅ Evidently report saved to: {output_path}")
|
| 737 |
+
print(f" 📊 {len(drifted_features)}/{total_features} features with drift detected")
|
| 738 |
+
|
| 739 |
+
return {
|
| 740 |
+
'status': 'success',
|
| 741 |
+
'report_path': output_path,
|
| 742 |
+
'total_features_analyzed': total_features,
|
| 743 |
+
'drifted_features': drifted_features,
|
| 744 |
+
'n_drifted': len(drifted_features),
|
| 745 |
+
'recommendation': 'Retrain model' if drifted_features else 'No significant drift detected'
|
| 746 |
+
}
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
def explain_with_dtreeviz(
|
| 750 |
+
model_path: str,
|
| 751 |
+
data_path: str,
|
| 752 |
+
target_col: str,
|
| 753 |
+
feature_names: Optional[List[str]] = None,
|
| 754 |
+
instance_index: int = 0,
|
| 755 |
+
output_path: Optional[str] = None
|
| 756 |
+
) -> Dict[str, Any]:
|
| 757 |
+
"""
|
| 758 |
+
Generate tree visualization using dtreeviz for tree-based models.
|
| 759 |
+
|
| 760 |
+
Creates publication-quality decision tree visualizations showing:
|
| 761 |
+
- Decision path for individual predictions
|
| 762 |
+
- Feature distributions at each node
|
| 763 |
+
- Split thresholds with data histograms
|
| 764 |
+
|
| 765 |
+
Args:
|
| 766 |
+
model_path: Path to trained tree-based model (.pkl)
|
| 767 |
+
data_path: Path to dataset
|
| 768 |
+
target_col: Target column name
|
| 769 |
+
feature_names: List of feature names (auto-detected if None)
|
| 770 |
+
instance_index: Index of instance to trace through tree
|
| 771 |
+
output_path: Path to save SVG visualization
|
| 772 |
+
|
| 773 |
+
Returns:
|
| 774 |
+
Dictionary with visualization path and tree info
|
| 775 |
+
"""
|
| 776 |
+
try:
|
| 777 |
+
import dtreeviz
|
| 778 |
+
except ImportError:
|
| 779 |
+
return {
|
| 780 |
+
'status': 'error',
|
| 781 |
+
'message': 'dtreeviz not installed. Install with: pip install dtreeviz>=2.2'
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
validate_file_exists(model_path)
|
| 785 |
+
validate_file_exists(data_path)
|
| 786 |
+
|
| 787 |
+
model = joblib.load(model_path)
|
| 788 |
+
df = load_dataframe(data_path)
|
| 789 |
+
validate_dataframe(df)
|
| 790 |
+
|
| 791 |
+
# Prepare data
|
| 792 |
+
if target_col in df.columns:
|
| 793 |
+
X = df.drop(target_col).to_pandas()
|
| 794 |
+
y = df[target_col].to_pandas()
|
| 795 |
+
else:
|
| 796 |
+
X = df.to_pandas()
|
| 797 |
+
y = None
|
| 798 |
+
|
| 799 |
+
if feature_names is None:
|
| 800 |
+
feature_names = X.columns.tolist()
|
| 801 |
+
|
| 802 |
+
print(f"🌳 Generating dtreeviz visualization...")
|
| 803 |
+
|
| 804 |
+
if output_path is None:
|
| 805 |
+
output_path = "./outputs/reports/dtreeviz_tree.svg"
|
| 806 |
+
|
| 807 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 808 |
+
|
| 809 |
+
try:
|
| 810 |
+
# Check if model is a tree-based model
|
| 811 |
+
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 812 |
+
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
|
| 813 |
+
|
| 814 |
+
# For ensemble models, use the first estimator
|
| 815 |
+
tree_model = model
|
| 816 |
+
if hasattr(model, 'estimators_'):
|
| 817 |
+
tree_model = model.estimators_[0]
|
| 818 |
+
print(" 📌 Using first estimator from ensemble for visualization")
|
| 819 |
+
|
| 820 |
+
# Determine task type
|
| 821 |
+
is_classifier = hasattr(model, 'predict_proba')
|
| 822 |
+
|
| 823 |
+
# Create visualization
|
| 824 |
+
viz_model = dtreeviz.model(
|
| 825 |
+
tree_model,
|
| 826 |
+
X_train=X,
|
| 827 |
+
y_train=y,
|
| 828 |
+
feature_names=feature_names,
|
| 829 |
+
target_name=target_col,
|
| 830 |
+
class_names=list(map(str, sorted(y.unique()))) if is_classifier and y is not None else None
|
| 831 |
+
)
|
| 832 |
+
|
| 833 |
+
# Generate tree visualization
|
| 834 |
+
v = viz_model.view(x=X.iloc[instance_index])
|
| 835 |
+
v.save(output_path)
|
| 836 |
+
|
| 837 |
+
print(f"✅ Tree visualization saved to: {output_path}")
|
| 838 |
+
|
| 839 |
+
return {
|
| 840 |
+
'status': 'success',
|
| 841 |
+
'visualization_path': output_path,
|
| 842 |
+
'model_type': type(model).__name__,
|
| 843 |
+
'n_features': len(feature_names),
|
| 844 |
+
'instance_explained': instance_index,
|
| 845 |
+
'tree_depth': tree_model.get_depth() if hasattr(tree_model, 'get_depth') else 'unknown'
|
| 846 |
+
}
|
| 847 |
+
|
| 848 |
+
except Exception as e:
|
| 849 |
+
return {
|
| 850 |
+
'status': 'error',
|
| 851 |
+
'message': f'dtreeviz visualization failed: {str(e)}. Ensure model is tree-based (DecisionTree, RandomForest, XGBoost).'
|
| 852 |
+
}
|
src/tools/time_series.py
CHANGED
|
@@ -46,7 +46,7 @@ def forecast_time_series(
|
|
| 46 |
time_col: Time/date column name
|
| 47 |
target_col: Target variable to forecast
|
| 48 |
forecast_horizon: Number of periods to forecast ahead
|
| 49 |
-
method: Forecasting method ('arima', 'sarima', 'prophet', 'exponential_smoothing')
|
| 50 |
seasonal_period: Seasonal period (e.g., 7 for weekly, 12 for monthly)
|
| 51 |
output_path: Path to save forecast results
|
| 52 |
|
|
@@ -108,6 +108,57 @@ def forecast_time_series(
|
|
| 108 |
}
|
| 109 |
}
|
| 110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
elif method == "arima":
|
| 112 |
# ARIMA model
|
| 113 |
ts_data = df_pd.set_index(time_col)[target_col]
|
|
|
|
| 46 |
time_col: Time/date column name
|
| 47 |
target_col: Target variable to forecast
|
| 48 |
forecast_horizon: Number of periods to forecast ahead
|
| 49 |
+
method: Forecasting method ('arima', 'auto_arima', 'sarima', 'prophet', 'exponential_smoothing')
|
| 50 |
seasonal_period: Seasonal period (e.g., 7 for weekly, 12 for monthly)
|
| 51 |
output_path: Path to save forecast results
|
| 52 |
|
|
|
|
| 108 |
}
|
| 109 |
}
|
| 110 |
|
| 111 |
+
elif method == "auto_arima":
|
| 112 |
+
# Auto ARIMA using pmdarima - automatically finds best (p,d,q) order
|
| 113 |
+
try:
|
| 114 |
+
import pmdarima as pm
|
| 115 |
+
except ImportError:
|
| 116 |
+
return {
|
| 117 |
+
'status': 'error',
|
| 118 |
+
'message': 'pmdarima not installed. Install with: pip install pmdarima>=2.0'
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
ts_data = df_pd.set_index(time_col)[target_col]
|
| 122 |
+
|
| 123 |
+
print("🔧 Running auto_arima to find optimal ARIMA order...")
|
| 124 |
+
auto_model = pm.auto_arima(
|
| 125 |
+
ts_data,
|
| 126 |
+
seasonal=bool(seasonal_period),
|
| 127 |
+
m=seasonal_period or 1,
|
| 128 |
+
stepwise=True,
|
| 129 |
+
suppress_warnings=True,
|
| 130 |
+
error_action='ignore',
|
| 131 |
+
max_p=5, max_q=5, max_d=2,
|
| 132 |
+
max_P=2, max_Q=2, max_D=1,
|
| 133 |
+
trace=False
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Forecast
|
| 137 |
+
forecast_vals, conf_int = auto_model.predict(
|
| 138 |
+
n_periods=forecast_horizon,
|
| 139 |
+
return_conf_int=True
|
| 140 |
+
)
|
| 141 |
+
forecast_index = pd.date_range(start=ts_data.index[-1], periods=forecast_horizon+1, freq='D')[1:]
|
| 142 |
+
|
| 143 |
+
result = {
|
| 144 |
+
'method': 'auto_arima',
|
| 145 |
+
'order': str(auto_model.order),
|
| 146 |
+
'seasonal_order': str(auto_model.seasonal_order) if seasonal_period else None,
|
| 147 |
+
'forecast': [
|
| 148 |
+
{
|
| 149 |
+
'date': str(date),
|
| 150 |
+
'value': float(val),
|
| 151 |
+
'lower_ci': float(ci[0]),
|
| 152 |
+
'upper_ci': float(ci[1])
|
| 153 |
+
}
|
| 154 |
+
for date, val, ci in zip(forecast_index, forecast_vals, conf_int)
|
| 155 |
+
],
|
| 156 |
+
'aic': float(auto_model.aic()),
|
| 157 |
+
'bic': float(auto_model.bic()),
|
| 158 |
+
'model_summary': str(auto_model.summary())
|
| 159 |
+
}
|
| 160 |
+
print(f" ✅ Best order: {auto_model.order} | AIC: {auto_model.aic():.2f}")
|
| 161 |
+
|
| 162 |
elif method == "arima":
|
| 163 |
# ARIMA model
|
| 164 |
ts_data = df_pd.set_index(time_col)[target_col]
|
src/tools/tools_registry.py
CHANGED
|
@@ -1067,6 +1067,40 @@ TOOLS = [
|
|
| 1067 |
}
|
| 1068 |
}
|
| 1069 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1070 |
|
| 1071 |
# ============================================
|
| 1072 |
# TIME SERIES (3)
|
|
@@ -1449,6 +1483,55 @@ TOOLS = [
|
|
| 1449 |
}
|
| 1450 |
}
|
| 1451 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1452 |
# ========================================
|
| 1453 |
# CODE INTERPRETER - THE GAME CHANGER 🚀
|
| 1454 |
# ========================================
|
|
@@ -1632,6 +1715,390 @@ TOOLS = [
|
|
| 1632 |
"required": ["project_id", "query"]
|
| 1633 |
}
|
| 1634 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1635 |
}
|
| 1636 |
]
|
| 1637 |
|
|
|
|
| 1067 |
}
|
| 1068 |
}
|
| 1069 |
},
|
| 1070 |
+
{
|
| 1071 |
+
"type": "function",
|
| 1072 |
+
"function": {
|
| 1073 |
+
"name": "monitor_drift_evidently",
|
| 1074 |
+
"description": "Generate comprehensive data drift report using Evidently AI. Provides statistical tests per feature, data quality metrics, and interactive HTML dashboard.",
|
| 1075 |
+
"parameters": {
|
| 1076 |
+
"type": "object",
|
| 1077 |
+
"properties": {
|
| 1078 |
+
"reference_data_path": {"type": "string", "description": "Path to training/reference dataset"},
|
| 1079 |
+
"current_data_path": {"type": "string", "description": "Path to production/current dataset"},
|
| 1080 |
+
"output_path": {"type": "string", "description": "Path to save HTML drift report"}
|
| 1081 |
+
},
|
| 1082 |
+
"required": ["reference_data_path", "current_data_path"]
|
| 1083 |
+
}
|
| 1084 |
+
}
|
| 1085 |
+
},
|
| 1086 |
+
{
|
| 1087 |
+
"type": "function",
|
| 1088 |
+
"function": {
|
| 1089 |
+
"name": "explain_with_dtreeviz",
|
| 1090 |
+
"description": "Generate publication-quality decision tree visualizations using dtreeviz. Shows decision path, feature distributions at each node, and split thresholds.",
|
| 1091 |
+
"parameters": {
|
| 1092 |
+
"type": "object",
|
| 1093 |
+
"properties": {
|
| 1094 |
+
"model_path": {"type": "string", "description": "Path to trained tree-based model (.pkl)"},
|
| 1095 |
+
"data_path": {"type": "string", "description": "Path to dataset"},
|
| 1096 |
+
"target_col": {"type": "string", "description": "Target column name"},
|
| 1097 |
+
"instance_index": {"type": "integer", "description": "Index of instance to trace through tree (default: 0)"},
|
| 1098 |
+
"output_path": {"type": "string", "description": "Path to save SVG visualization"}
|
| 1099 |
+
},
|
| 1100 |
+
"required": ["model_path", "data_path", "target_col"]
|
| 1101 |
+
}
|
| 1102 |
+
}
|
| 1103 |
+
},
|
| 1104 |
|
| 1105 |
# ============================================
|
| 1106 |
# TIME SERIES (3)
|
|
|
|
| 1483 |
}
|
| 1484 |
}
|
| 1485 |
},
|
| 1486 |
+
{
|
| 1487 |
+
"type": "function",
|
| 1488 |
+
"function": {
|
| 1489 |
+
"name": "generate_sweetviz_report",
|
| 1490 |
+
"description": "Generate interactive EDA report using Sweetviz. Provides feature-by-feature analysis, target associations, and dataset comparison. Great for train vs test comparison.",
|
| 1491 |
+
"parameters": {
|
| 1492 |
+
"type": "object",
|
| 1493 |
+
"properties": {
|
| 1494 |
+
"file_path": {"type": "string", "description": "Path to the dataset CSV/Parquet file"},
|
| 1495 |
+
"target_col": {"type": "string", "description": "Optional target column for supervised analysis"},
|
| 1496 |
+
"compare_file_path": {"type": "string", "description": "Optional second dataset for comparison (e.g., test set)"},
|
| 1497 |
+
"output_path": {"type": "string", "description": "Where to save HTML report (default: ./outputs/reports/sweetviz_report.html)"}
|
| 1498 |
+
},
|
| 1499 |
+
"required": ["file_path"]
|
| 1500 |
+
}
|
| 1501 |
+
}
|
| 1502 |
+
},
|
| 1503 |
+
{
|
| 1504 |
+
"type": "function",
|
| 1505 |
+
"function": {
|
| 1506 |
+
"name": "detect_label_errors",
|
| 1507 |
+
"description": "Detect potential label errors in classification datasets using cleanlab. Uses confident learning to find mislabeled examples by cross-validating classifiers and identifying disagreements.",
|
| 1508 |
+
"parameters": {
|
| 1509 |
+
"type": "object",
|
| 1510 |
+
"properties": {
|
| 1511 |
+
"file_path": {"type": "string", "description": "Path to classification dataset"},
|
| 1512 |
+
"target_col": {"type": "string", "description": "Target/label column name"},
|
| 1513 |
+
"features": {"type": "array", "items": {"type": "string"}, "description": "Feature columns (None = all numeric)"},
|
| 1514 |
+
"output_path": {"type": "string", "description": "Path to save flagged rows"}
|
| 1515 |
+
},
|
| 1516 |
+
"required": ["file_path", "target_col"]
|
| 1517 |
+
}
|
| 1518 |
+
}
|
| 1519 |
+
},
|
| 1520 |
+
{
|
| 1521 |
+
"type": "function",
|
| 1522 |
+
"function": {
|
| 1523 |
+
"name": "validate_schema_pandera",
|
| 1524 |
+
"description": "Validate a DataFrame against a pandera schema. Check column types, nullability, value ranges, and custom constraints.",
|
| 1525 |
+
"parameters": {
|
| 1526 |
+
"type": "object",
|
| 1527 |
+
"properties": {
|
| 1528 |
+
"file_path": {"type": "string", "description": "Path to dataset to validate"},
|
| 1529 |
+
"schema_config": {"type": "object", "description": "Schema configuration with column definitions"}
|
| 1530 |
+
},
|
| 1531 |
+
"required": ["file_path", "schema_config"]
|
| 1532 |
+
}
|
| 1533 |
+
}
|
| 1534 |
+
},
|
| 1535 |
# ========================================
|
| 1536 |
# CODE INTERPRETER - THE GAME CHANGER 🚀
|
| 1537 |
# ========================================
|
|
|
|
| 1715 |
"required": ["project_id", "query"]
|
| 1716 |
}
|
| 1717 |
}
|
| 1718 |
+
},
|
| 1719 |
+
|
| 1720 |
+
# ============================================
|
| 1721 |
+
# AUTOGLUON TRAINING (3) - AutoML at Scale
|
| 1722 |
+
# ============================================
|
| 1723 |
+
{
|
| 1724 |
+
"type": "function",
|
| 1725 |
+
"function": {
|
| 1726 |
+
"name": "train_with_autogluon",
|
| 1727 |
+
"description": "Train ML models using AutoGluon AutoML. Automatically trains and ensembles 10+ models (LightGBM, XGBoost, CatBoost, RandomForest, etc.) with stacking. Handles raw data directly - no need to manually encode categoricals or impute missing values. Supports classification (binary/multiclass) and regression. Use this instead of train_baseline_models for best performance.",
|
| 1728 |
+
"parameters": {
|
| 1729 |
+
"type": "object",
|
| 1730 |
+
"properties": {
|
| 1731 |
+
"file_path": {
|
| 1732 |
+
"type": "string",
|
| 1733 |
+
"description": "Path to CSV/Parquet dataset"
|
| 1734 |
+
},
|
| 1735 |
+
"target_col": {
|
| 1736 |
+
"type": "string",
|
| 1737 |
+
"description": "Column to predict"
|
| 1738 |
+
},
|
| 1739 |
+
"task_type": {
|
| 1740 |
+
"type": "string",
|
| 1741 |
+
"enum": ["classification", "regression", "auto"],
|
| 1742 |
+
"description": "Type of ML task. 'auto' to detect automatically."
|
| 1743 |
+
},
|
| 1744 |
+
"time_limit": {
|
| 1745 |
+
"type": "integer",
|
| 1746 |
+
"description": "Max training time in seconds (default: 120). Higher = better models."
|
| 1747 |
+
},
|
| 1748 |
+
"presets": {
|
| 1749 |
+
"type": "string",
|
| 1750 |
+
"enum": ["medium_quality", "good_quality", "best_quality"],
|
| 1751 |
+
"description": "Quality preset. medium_quality=fast, best_quality=slower but better."
|
| 1752 |
+
},
|
| 1753 |
+
"eval_metric": {
|
| 1754 |
+
"type": "string",
|
| 1755 |
+
"description": "Metric to optimize. Classification: 'accuracy','f1','roc_auc'. Regression: 'rmse','mae','r2'. Auto-selected if None."
|
| 1756 |
+
},
|
| 1757 |
+
"output_dir": {
|
| 1758 |
+
"type": "string",
|
| 1759 |
+
"description": "Directory to save trained model (default: ./outputs/autogluon_model)"
|
| 1760 |
+
},
|
| 1761 |
+
"infer_limit": {
|
| 1762 |
+
"type": "number",
|
| 1763 |
+
"description": "Max inference time per row in seconds. Only models meeting this speed constraint are kept. E.g. 0.01 = 10ms/row."
|
| 1764 |
+
}
|
| 1765 |
+
},
|
| 1766 |
+
"required": ["file_path", "target_col"]
|
| 1767 |
+
}
|
| 1768 |
+
}
|
| 1769 |
+
},
|
| 1770 |
+
{
|
| 1771 |
+
"type": "function",
|
| 1772 |
+
"function": {
|
| 1773 |
+
"name": "predict_with_autogluon",
|
| 1774 |
+
"description": "Make predictions on new data using a trained AutoGluon model. Returns predictions and probability scores for classification tasks.",
|
| 1775 |
+
"parameters": {
|
| 1776 |
+
"type": "object",
|
| 1777 |
+
"properties": {
|
| 1778 |
+
"model_path": {
|
| 1779 |
+
"type": "string",
|
| 1780 |
+
"description": "Path to saved AutoGluon model directory"
|
| 1781 |
+
},
|
| 1782 |
+
"data_path": {
|
| 1783 |
+
"type": "string",
|
| 1784 |
+
"description": "Path to new data CSV/Parquet for prediction"
|
| 1785 |
+
},
|
| 1786 |
+
"output_path": {
|
| 1787 |
+
"type": "string",
|
| 1788 |
+
"description": "Path to save predictions CSV"
|
| 1789 |
+
}
|
| 1790 |
+
},
|
| 1791 |
+
"required": ["model_path", "data_path"]
|
| 1792 |
+
}
|
| 1793 |
+
}
|
| 1794 |
+
},
|
| 1795 |
+
{
|
| 1796 |
+
"type": "function",
|
| 1797 |
+
"function": {
|
| 1798 |
+
"name": "forecast_with_autogluon",
|
| 1799 |
+
"description": "Forecast time series using AutoGluon TimeSeriesPredictor. Trains and ensembles multiple models including DeepAR, ETS, ARIMA, Theta, and Chronos. Supports covariates, holiday features, model selection, and probabilistic forecasts. Much more powerful than basic ARIMA/Prophet.",
|
| 1800 |
+
"parameters": {
|
| 1801 |
+
"type": "object",
|
| 1802 |
+
"properties": {
|
| 1803 |
+
"file_path": {
|
| 1804 |
+
"type": "string",
|
| 1805 |
+
"description": "Path to time series CSV/Parquet"
|
| 1806 |
+
},
|
| 1807 |
+
"target_col": {
|
| 1808 |
+
"type": "string",
|
| 1809 |
+
"description": "Column with values to forecast"
|
| 1810 |
+
},
|
| 1811 |
+
"time_col": {
|
| 1812 |
+
"type": "string",
|
| 1813 |
+
"description": "Column with timestamps/dates"
|
| 1814 |
+
},
|
| 1815 |
+
"forecast_horizon": {
|
| 1816 |
+
"type": "integer",
|
| 1817 |
+
"description": "Number of future periods to predict (default: 30)"
|
| 1818 |
+
},
|
| 1819 |
+
"id_col": {
|
| 1820 |
+
"type": "string",
|
| 1821 |
+
"description": "Column identifying different series (for multi-series forecasting)"
|
| 1822 |
+
},
|
| 1823 |
+
"freq": {
|
| 1824 |
+
"type": "string",
|
| 1825 |
+
"description": "Frequency: 'D'=daily, 'h'=hourly, 'W'=weekly, 'MS'=monthly. Auto-detected if omitted."
|
| 1826 |
+
},
|
| 1827 |
+
"time_limit": {
|
| 1828 |
+
"type": "integer",
|
| 1829 |
+
"description": "Max training time in seconds (default: 120)"
|
| 1830 |
+
},
|
| 1831 |
+
"presets": {
|
| 1832 |
+
"type": "string",
|
| 1833 |
+
"enum": ["fast_training", "medium_quality", "best_quality"],
|
| 1834 |
+
"description": "Quality preset for forecasting models"
|
| 1835 |
+
},
|
| 1836 |
+
"output_path": {
|
| 1837 |
+
"type": "string",
|
| 1838 |
+
"description": "Path to save forecast CSV"
|
| 1839 |
+
},
|
| 1840 |
+
"static_features_path": {
|
| 1841 |
+
"type": "string",
|
| 1842 |
+
"description": "CSV with per-series metadata (one row per series). Improves cross-series learning."
|
| 1843 |
+
},
|
| 1844 |
+
"known_covariates_cols": {
|
| 1845 |
+
"type": "array",
|
| 1846 |
+
"items": {"type": "string"},
|
| 1847 |
+
"description": "Columns with future-known values (holidays, promotions, day_of_week)"
|
| 1848 |
+
},
|
| 1849 |
+
"holiday_country": {
|
| 1850 |
+
"type": "string",
|
| 1851 |
+
"description": "Country code for auto holiday features: 'US', 'UK', 'IN', 'DE', etc."
|
| 1852 |
+
},
|
| 1853 |
+
"fill_missing": {
|
| 1854 |
+
"type": "boolean",
|
| 1855 |
+
"description": "Auto-fill missing values in time series (default: true)"
|
| 1856 |
+
},
|
| 1857 |
+
"models": {
|
| 1858 |
+
"type": "array",
|
| 1859 |
+
"items": {"type": "string"},
|
| 1860 |
+
"description": "Specific models to train: 'ETS', 'AutoARIMA', 'Theta', 'DeepAR', 'PatchTST', 'DLinear', 'TFT', 'SeasonalNaive'"
|
| 1861 |
+
},
|
| 1862 |
+
"quantile_levels": {
|
| 1863 |
+
"type": "array",
|
| 1864 |
+
"items": {"type": "number"},
|
| 1865 |
+
"description": "Quantile levels for probabilistic forecasts. E.g. [0.1, 0.5, 0.9] for 10th/50th/90th percentile."
|
| 1866 |
+
}
|
| 1867 |
+
},
|
| 1868 |
+
"required": ["file_path", "target_col", "time_col"]
|
| 1869 |
+
}
|
| 1870 |
+
}
|
| 1871 |
+
},
|
| 1872 |
+
|
| 1873 |
+
# ============================================
|
| 1874 |
+
# AUTOGLUON ADVANCED (6) - Post-Training, Analysis, Multi-Label, Backtesting
|
| 1875 |
+
# ============================================
|
| 1876 |
+
{
|
| 1877 |
+
"type": "function",
|
| 1878 |
+
"function": {
|
| 1879 |
+
"name": "optimize_autogluon_model",
|
| 1880 |
+
"description": "Post-training optimization on a trained AutoGluon model. Operations: refit_full (re-train on 100% data for deployment), distill (compress ensemble into single model), calibrate_threshold (optimize binary classification threshold), deploy_optimize (strip artifacts for minimal deployment), delete_models (remove specific models to free resources).",
|
| 1881 |
+
"parameters": {
|
| 1882 |
+
"type": "object",
|
| 1883 |
+
"properties": {
|
| 1884 |
+
"model_path": {
|
| 1885 |
+
"type": "string",
|
| 1886 |
+
"description": "Path to saved AutoGluon model directory"
|
| 1887 |
+
},
|
| 1888 |
+
"operation": {
|
| 1889 |
+
"type": "string",
|
| 1890 |
+
"enum": ["refit_full", "distill", "calibrate_threshold", "deploy_optimize", "delete_models"],
|
| 1891 |
+
"description": "Optimization operation to perform"
|
| 1892 |
+
},
|
| 1893 |
+
"data_path": {
|
| 1894 |
+
"type": "string",
|
| 1895 |
+
"description": "Path to dataset (required for distill, calibrate_threshold)"
|
| 1896 |
+
},
|
| 1897 |
+
"metric": {
|
| 1898 |
+
"type": "string",
|
| 1899 |
+
"enum": ["f1", "balanced_accuracy", "precision", "recall"],
|
| 1900 |
+
"description": "Metric for calibrate_threshold optimization"
|
| 1901 |
+
},
|
| 1902 |
+
"models_to_delete": {
|
| 1903 |
+
"type": "array",
|
| 1904 |
+
"items": {"type": "string"},
|
| 1905 |
+
"description": "Model names to delete (for delete_models operation)"
|
| 1906 |
+
},
|
| 1907 |
+
"output_dir": {
|
| 1908 |
+
"type": "string",
|
| 1909 |
+
"description": "Output directory for deploy_optimize"
|
| 1910 |
+
}
|
| 1911 |
+
},
|
| 1912 |
+
"required": ["model_path", "operation"]
|
| 1913 |
+
}
|
| 1914 |
+
}
|
| 1915 |
+
},
|
| 1916 |
+
{
|
| 1917 |
+
"type": "function",
|
| 1918 |
+
"function": {
|
| 1919 |
+
"name": "analyze_autogluon_model",
|
| 1920 |
+
"description": "Inspect and analyze a trained AutoGluon model. Operations: summary (extended leaderboard with stack levels, memory, inference speed), transform_features (get internally transformed feature matrix), info (comprehensive model metadata and training summary).",
|
| 1921 |
+
"parameters": {
|
| 1922 |
+
"type": "object",
|
| 1923 |
+
"properties": {
|
| 1924 |
+
"model_path": {
|
| 1925 |
+
"type": "string",
|
| 1926 |
+
"description": "Path to saved AutoGluon model directory"
|
| 1927 |
+
},
|
| 1928 |
+
"data_path": {
|
| 1929 |
+
"type": "string",
|
| 1930 |
+
"description": "Path to dataset (required for transform_features)"
|
| 1931 |
+
},
|
| 1932 |
+
"operation": {
|
| 1933 |
+
"type": "string",
|
| 1934 |
+
"enum": ["summary", "transform_features", "info"],
|
| 1935 |
+
"description": "Analysis operation to perform"
|
| 1936 |
+
}
|
| 1937 |
+
},
|
| 1938 |
+
"required": ["model_path"]
|
| 1939 |
+
}
|
| 1940 |
+
}
|
| 1941 |
+
},
|
| 1942 |
+
{
|
| 1943 |
+
"type": "function",
|
| 1944 |
+
"function": {
|
| 1945 |
+
"name": "extend_autogluon_training",
|
| 1946 |
+
"description": "Add models or re-fit ensemble on an existing AutoGluon predictor without retraining from scratch. Operations: fit_extra (train additional models/hyperparameters), fit_weighted_ensemble (re-fit ensemble weights on existing base models).",
|
| 1947 |
+
"parameters": {
|
| 1948 |
+
"type": "object",
|
| 1949 |
+
"properties": {
|
| 1950 |
+
"model_path": {
|
| 1951 |
+
"type": "string",
|
| 1952 |
+
"description": "Path to saved AutoGluon model directory"
|
| 1953 |
+
},
|
| 1954 |
+
"operation": {
|
| 1955 |
+
"type": "string",
|
| 1956 |
+
"enum": ["fit_extra", "fit_weighted_ensemble"],
|
| 1957 |
+
"description": "Extension operation to perform"
|
| 1958 |
+
},
|
| 1959 |
+
"data_path": {
|
| 1960 |
+
"type": "string",
|
| 1961 |
+
"description": "Path to training data (required for fit_extra)"
|
| 1962 |
+
},
|
| 1963 |
+
"time_limit": {
|
| 1964 |
+
"type": "integer",
|
| 1965 |
+
"description": "Additional training time in seconds (default: 60)"
|
| 1966 |
+
},
|
| 1967 |
+
"hyperparameters": {
|
| 1968 |
+
"type": "object",
|
| 1969 |
+
"description": "Model hyperparameters dict. E.g. {\"GBM\": {\"num_boost_round\": 500}, \"RF\": {}}"
|
| 1970 |
+
}
|
| 1971 |
+
},
|
| 1972 |
+
"required": ["model_path"]
|
| 1973 |
+
}
|
| 1974 |
+
}
|
| 1975 |
+
},
|
| 1976 |
+
{
|
| 1977 |
+
"type": "function",
|
| 1978 |
+
"function": {
|
| 1979 |
+
"name": "train_multilabel_autogluon",
|
| 1980 |
+
"description": "Train multi-label prediction model. Predicts multiple target columns simultaneously by training separate AutoGluon TabularPredictors per label with shared feature engineering. Use when dataset has multiple columns to predict.",
|
| 1981 |
+
"parameters": {
|
| 1982 |
+
"type": "object",
|
| 1983 |
+
"properties": {
|
| 1984 |
+
"file_path": {
|
| 1985 |
+
"type": "string",
|
| 1986 |
+
"description": "Path to CSV/Parquet dataset"
|
| 1987 |
+
},
|
| 1988 |
+
"target_cols": {
|
| 1989 |
+
"type": "array",
|
| 1990 |
+
"items": {"type": "string"},
|
| 1991 |
+
"description": "List of columns to predict (e.g. ['label1', 'label2'])"
|
| 1992 |
+
},
|
| 1993 |
+
"time_limit": {
|
| 1994 |
+
"type": "integer",
|
| 1995 |
+
"description": "Max training time per label in seconds (default: 120)"
|
| 1996 |
+
},
|
| 1997 |
+
"presets": {
|
| 1998 |
+
"type": "string",
|
| 1999 |
+
"enum": ["medium_quality", "good_quality", "best_quality"],
|
| 2000 |
+
"description": "Quality preset"
|
| 2001 |
+
},
|
| 2002 |
+
"output_dir": {
|
| 2003 |
+
"type": "string",
|
| 2004 |
+
"description": "Where to save trained model"
|
| 2005 |
+
}
|
| 2006 |
+
},
|
| 2007 |
+
"required": ["file_path", "target_cols"]
|
| 2008 |
+
}
|
| 2009 |
+
}
|
| 2010 |
+
},
|
| 2011 |
+
{
|
| 2012 |
+
"type": "function",
|
| 2013 |
+
"function": {
|
| 2014 |
+
"name": "backtest_timeseries",
|
| 2015 |
+
"description": "Backtest time series models using multiple validation windows. More robust performance estimation than single train/test split. Trains models with multi-window cross-validation and returns per-window evaluation.",
|
| 2016 |
+
"parameters": {
|
| 2017 |
+
"type": "object",
|
| 2018 |
+
"properties": {
|
| 2019 |
+
"file_path": {
|
| 2020 |
+
"type": "string",
|
| 2021 |
+
"description": "Path to time series CSV/Parquet"
|
| 2022 |
+
},
|
| 2023 |
+
"target_col": {
|
| 2024 |
+
"type": "string",
|
| 2025 |
+
"description": "Column with values to forecast"
|
| 2026 |
+
},
|
| 2027 |
+
"time_col": {
|
| 2028 |
+
"type": "string",
|
| 2029 |
+
"description": "Column with timestamps/dates"
|
| 2030 |
+
},
|
| 2031 |
+
"forecast_horizon": {
|
| 2032 |
+
"type": "integer",
|
| 2033 |
+
"description": "Periods to predict per window (default: 30)"
|
| 2034 |
+
},
|
| 2035 |
+
"id_col": {
|
| 2036 |
+
"type": "string",
|
| 2037 |
+
"description": "Column identifying different series"
|
| 2038 |
+
},
|
| 2039 |
+
"freq": {
|
| 2040 |
+
"type": "string",
|
| 2041 |
+
"description": "Frequency string"
|
| 2042 |
+
},
|
| 2043 |
+
"num_val_windows": {
|
| 2044 |
+
"type": "integer",
|
| 2045 |
+
"description": "Number of backtesting windows (default: 3)"
|
| 2046 |
+
},
|
| 2047 |
+
"time_limit": {
|
| 2048 |
+
"type": "integer",
|
| 2049 |
+
"description": "Max training time in seconds"
|
| 2050 |
+
},
|
| 2051 |
+
"presets": {
|
| 2052 |
+
"type": "string",
|
| 2053 |
+
"enum": ["fast_training", "medium_quality", "best_quality"],
|
| 2054 |
+
"description": "Quality preset"
|
| 2055 |
+
},
|
| 2056 |
+
"output_path": {
|
| 2057 |
+
"type": "string",
|
| 2058 |
+
"description": "Path to save backtest predictions CSV"
|
| 2059 |
+
}
|
| 2060 |
+
},
|
| 2061 |
+
"required": ["file_path", "target_col", "time_col"]
|
| 2062 |
+
}
|
| 2063 |
+
}
|
| 2064 |
+
},
|
| 2065 |
+
{
|
| 2066 |
+
"type": "function",
|
| 2067 |
+
"function": {
|
| 2068 |
+
"name": "analyze_timeseries_model",
|
| 2069 |
+
"description": "Analyze a trained AutoGluon time series model. Operations: feature_importance (permutation importance of covariates), plot (forecast vs actuals visualization), make_future_dataframe (generate future timestamp skeleton for prediction with covariates).",
|
| 2070 |
+
"parameters": {
|
| 2071 |
+
"type": "object",
|
| 2072 |
+
"properties": {
|
| 2073 |
+
"model_path": {
|
| 2074 |
+
"type": "string",
|
| 2075 |
+
"description": "Path to saved AutoGluon TimeSeriesPredictor"
|
| 2076 |
+
},
|
| 2077 |
+
"data_path": {
|
| 2078 |
+
"type": "string",
|
| 2079 |
+
"description": "Path to time series data"
|
| 2080 |
+
},
|
| 2081 |
+
"time_col": {
|
| 2082 |
+
"type": "string",
|
| 2083 |
+
"description": "Column with timestamps/dates"
|
| 2084 |
+
},
|
| 2085 |
+
"id_col": {
|
| 2086 |
+
"type": "string",
|
| 2087 |
+
"description": "Column identifying different series"
|
| 2088 |
+
},
|
| 2089 |
+
"operation": {
|
| 2090 |
+
"type": "string",
|
| 2091 |
+
"enum": ["feature_importance", "plot", "make_future_dataframe"],
|
| 2092 |
+
"description": "Analysis operation to perform"
|
| 2093 |
+
},
|
| 2094 |
+
"output_path": {
|
| 2095 |
+
"type": "string",
|
| 2096 |
+
"description": "Path to save output (plot image or CSV)"
|
| 2097 |
+
}
|
| 2098 |
+
},
|
| 2099 |
+
"required": ["model_path", "data_path", "time_col"]
|
| 2100 |
+
}
|
| 2101 |
+
}
|
| 2102 |
}
|
| 2103 |
]
|
| 2104 |
|
src/tools/visualization_engine.py
CHANGED
|
@@ -181,7 +181,7 @@ def generate_data_quality_plots(file_path: str, output_dir: str) -> Dict[str, An
|
|
| 181 |
return {"plot_paths": plots, "figures": figures, "n_plots": len(plots)}
|
| 182 |
|
| 183 |
|
| 184 |
-
def generate_eda_plots(file_path: str, target_col: Optional[str], output_dir: str) -> Dict[str, Any]:
|
| 185 |
"""Generate exploratory data analysis plots using Matplotlib."""
|
| 186 |
df = load_dataframe(file_path).to_pandas()
|
| 187 |
plots = []
|
|
|
|
| 181 |
return {"plot_paths": plots, "figures": figures, "n_plots": len(plots)}
|
| 182 |
|
| 183 |
|
| 184 |
+
def generate_eda_plots(file_path: str, target_col: Optional[str] = None, output_dir: str = "./outputs/plots/eda") -> Dict[str, Any]:
|
| 185 |
"""Generate exploratory data analysis plots using Matplotlib."""
|
| 186 |
df = load_dataframe(file_path).to_pandas()
|
| 187 |
plots = []
|
src/utils/schema_extraction.py
CHANGED
|
@@ -22,13 +22,27 @@ def extract_schema_local(file_path: str, sample_rows: int = 5) -> Dict[str, Any]
|
|
| 22 |
try:
|
| 23 |
# Read with Polars (faster than pandas)
|
| 24 |
if file_path.endswith('.csv'):
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
elif file_path.endswith('.parquet'):
|
| 27 |
df = pl.read_parquet(file_path)
|
| 28 |
else:
|
| 29 |
# Fallback to pandas
|
| 30 |
import pandas as pd
|
| 31 |
-
pdf = pd.read_csv(file_path)
|
| 32 |
df = pl.from_pandas(pdf)
|
| 33 |
|
| 34 |
# Basic metadata
|
|
|
|
| 22 |
try:
|
| 23 |
# Read with Polars (faster than pandas)
|
| 24 |
if file_path.endswith('.csv'):
|
| 25 |
+
# 🔥 FIX: Use infer_schema_length and ignore_errors to handle mixed-type columns
|
| 26 |
+
# This prevents failures like: could not parse `835.159865` as dtype `i64`
|
| 27 |
+
try:
|
| 28 |
+
df = pl.read_csv(file_path, infer_schema_length=10000, ignore_errors=True)
|
| 29 |
+
except Exception:
|
| 30 |
+
# Final fallback: read everything as strings, then let Polars infer
|
| 31 |
+
try:
|
| 32 |
+
import pandas as pd
|
| 33 |
+
pdf = pd.read_csv(file_path, low_memory=False)
|
| 34 |
+
df = pl.from_pandas(pdf)
|
| 35 |
+
except Exception as e2:
|
| 36 |
+
return {
|
| 37 |
+
'error': f"Failed to read CSV: {str(e2)}",
|
| 38 |
+
'file_path': file_path
|
| 39 |
+
}
|
| 40 |
elif file_path.endswith('.parquet'):
|
| 41 |
df = pl.read_parquet(file_path)
|
| 42 |
else:
|
| 43 |
# Fallback to pandas
|
| 44 |
import pandas as pd
|
| 45 |
+
pdf = pd.read_csv(file_path, low_memory=False)
|
| 46 |
df = pl.from_pandas(pdf)
|
| 47 |
|
| 48 |
# Basic metadata
|
src/utils/validation.py
CHANGED
|
@@ -268,3 +268,110 @@ def validate_strategy_config(strategy: Dict[str, Any],
|
|
| 268 |
raise ValidationError(
|
| 269 |
f"Missing required strategy keys: {', '.join(missing)}"
|
| 270 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
raise ValidationError(
|
| 269 |
f"Missing required strategy keys: {', '.join(missing)}"
|
| 270 |
)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def validate_schema_pandera(
|
| 274 |
+
df: pl.DataFrame,
|
| 275 |
+
schema_config: Dict[str, Any]
|
| 276 |
+
) -> Dict[str, Any]:
|
| 277 |
+
"""
|
| 278 |
+
Validate a DataFrame against a pandera schema.
|
| 279 |
+
|
| 280 |
+
Schema config format:
|
| 281 |
+
{
|
| 282 |
+
"columns": {
|
| 283 |
+
"age": {"dtype": "int", "nullable": False, "checks": {"ge": 0, "le": 150}},
|
| 284 |
+
"name": {"dtype": "str", "nullable": False},
|
| 285 |
+
"salary": {"dtype": "float", "nullable": True, "checks": {"ge": 0}}
|
| 286 |
+
},
|
| 287 |
+
"coerce": True
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
df: Polars DataFrame to validate
|
| 292 |
+
schema_config: Dictionary defining the expected schema
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
Dictionary with validation results and any errors found
|
| 296 |
+
"""
|
| 297 |
+
try:
|
| 298 |
+
import pandera as pa
|
| 299 |
+
import pandas as pd
|
| 300 |
+
except ImportError:
|
| 301 |
+
return {
|
| 302 |
+
'status': 'error',
|
| 303 |
+
'message': 'pandera not installed. Install with: pip install pandera>=0.18'
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
columns_config = schema_config.get("columns", {})
|
| 307 |
+
coerce = schema_config.get("coerce", True)
|
| 308 |
+
|
| 309 |
+
# Build pandera schema from config
|
| 310 |
+
schema_columns = {}
|
| 311 |
+
dtype_map = {
|
| 312 |
+
"int": pa.Int,
|
| 313 |
+
"float": pa.Float,
|
| 314 |
+
"str": pa.String,
|
| 315 |
+
"bool": pa.Bool,
|
| 316 |
+
"datetime": pa.DateTime,
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
check_map = {
|
| 320 |
+
"ge": lambda v: pa.Check.ge(v),
|
| 321 |
+
"le": lambda v: pa.Check.le(v),
|
| 322 |
+
"gt": lambda v: pa.Check.gt(v),
|
| 323 |
+
"lt": lambda v: pa.Check.lt(v),
|
| 324 |
+
"in_range": lambda v: pa.Check.in_range(v[0], v[1]),
|
| 325 |
+
"isin": lambda v: pa.Check.isin(v),
|
| 326 |
+
"str_matches": lambda v: pa.Check.str_matches(v),
|
| 327 |
+
"str_length": lambda v: pa.Check.str_length(max_value=v),
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
for col_name, col_config in columns_config.items():
|
| 331 |
+
col_dtype = dtype_map.get(col_config.get("dtype", ""), None)
|
| 332 |
+
nullable = col_config.get("nullable", True)
|
| 333 |
+
checks_config = col_config.get("checks", {})
|
| 334 |
+
|
| 335 |
+
checks = []
|
| 336 |
+
for check_name, check_val in checks_config.items():
|
| 337 |
+
if check_name in check_map:
|
| 338 |
+
checks.append(check_map[check_name](check_val))
|
| 339 |
+
|
| 340 |
+
schema_columns[col_name] = pa.Column(
|
| 341 |
+
dtype=col_dtype,
|
| 342 |
+
nullable=nullable,
|
| 343 |
+
checks=checks if checks else None,
|
| 344 |
+
coerce=coerce
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
schema = pa.DataFrameSchema(columns=schema_columns, coerce=coerce)
|
| 348 |
+
|
| 349 |
+
# Convert Polars to Pandas for pandera validation
|
| 350 |
+
df_pd = df.to_pandas()
|
| 351 |
+
|
| 352 |
+
try:
|
| 353 |
+
schema.validate(df_pd, lazy=True)
|
| 354 |
+
return {
|
| 355 |
+
'status': 'success',
|
| 356 |
+
'valid': True,
|
| 357 |
+
'message': 'DataFrame passed all schema validations',
|
| 358 |
+
'columns_validated': list(columns_config.keys())
|
| 359 |
+
}
|
| 360 |
+
except pa.errors.SchemaErrors as err:
|
| 361 |
+
errors = []
|
| 362 |
+
for _, row in err.failure_cases.iterrows():
|
| 363 |
+
errors.append({
|
| 364 |
+
'column': str(row.get('column', '')),
|
| 365 |
+
'check': str(row.get('check', '')),
|
| 366 |
+
'failure_case': str(row.get('failure_case', '')),
|
| 367 |
+
'index': int(row.get('index', -1)) if row.get('index') is not None else None
|
| 368 |
+
})
|
| 369 |
+
|
| 370 |
+
return {
|
| 371 |
+
'status': 'success',
|
| 372 |
+
'valid': False,
|
| 373 |
+
'message': f'Schema validation failed with {len(errors)} errors',
|
| 374 |
+
'errors': errors[:50], # Limit to 50 errors
|
| 375 |
+
'total_errors': len(errors),
|
| 376 |
+
'columns_validated': list(columns_config.keys())
|
| 377 |
+
}
|
test_data/sample.csv
DELETED
|
@@ -1,16 +0,0 @@
|
|
| 1 |
-
age,income,score,purchased
|
| 2 |
-
25,50000,75,1
|
| 3 |
-
30,60000,82,1
|
| 4 |
-
22,45000,68,0
|
| 5 |
-
35,75000,88,1
|
| 6 |
-
28,55000,79,1
|
| 7 |
-
40,90000,92,1
|
| 8 |
-
23,42000,65,0
|
| 9 |
-
32,68000,85,1
|
| 10 |
-
27,52000,76,1
|
| 11 |
-
38,82000,90,1
|
| 12 |
-
24,48000,71,0
|
| 13 |
-
31,65000,84,1
|
| 14 |
-
26,51000,77,1
|
| 15 |
-
29,58000,80,1
|
| 16 |
-
33,72000,87,1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|