arturovaine commited on
Commit
4cfac3c
·
0 Parent(s):

Initial commit: mcp-civil-compliance

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MCP Civil Compliance Checker
3
+ emoji: 🏗️
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.24.0
8
+ app_file: app.py
9
+ pinned: false
10
+ tags:
11
+ - building-mcp-track-enterprise
12
+ - mcp
13
+ - model-context-protocol
14
+ - hackathon
15
+ ---
16
+
17
+ # MCP Civil Compliance Checker
18
+
19
+ Building code compliance MCP server with IFC parsing and validation
20
+
21
+ ## 🎯 MCP 1st Birthday Hackathon Submission
22
+
23
+ **Track:** Building Mcp Track Enterprise
24
+
25
+ ## 📖 About
26
+
27
+ This project is an MCP (Model Context Protocol) server that provides tools for building code compliance mcp server with ifc parsing and validation.
28
+
29
+ ## 🚀 Features
30
+
31
+ ### MCP Server Tools
32
+
33
+ [Tool descriptions will be added here]
34
+
35
+ ## 🛠️ Technical Stack
36
+
37
+ - **Framework:** FastAPI + MCP Protocol
38
+ - **UI:** Gradio 5.24.0
39
+ - **Language:** Python 3.10+
40
+
41
+ ## 📦 Installation
42
+
43
+ ```bash
44
+ pip install -r requirements.txt
45
+ ```
46
+
47
+ ## 🎮 Usage
48
+
49
+ ### As a Gradio App (This Space)
50
+
51
+ Simply use the interface above to interact with the MCP tools through a user-friendly UI.
52
+
53
+ ### As an MCP Server
54
+
55
+ ```bash
56
+ python server/app.py
57
+ ```
58
+
59
+ The MCP server will start on the configured port and expose the following endpoints:
60
+ - `GET /tools` - List all available MCP tools
61
+ - `POST /invoke` - Invoke a specific tool
62
+ - `GET /health` - Health check
63
+
64
+ ## 📝 MCP Integration
65
+
66
+ This server can be integrated with MCP-compatible clients like:
67
+ - Claude Desktop
68
+ - Cursor
69
+ - Other MCP-enabled applications
70
+
71
+ ## 🎥 Demo Video
72
+
73
+ [Demo video link will be added here]
74
+
75
+ ## 👥 Team
76
+
77
+ [Team member information]
78
+
79
+ ## 📄 License
80
+
81
+ [License information]
82
+
83
+ ## 🙏 Acknowledgments
84
+
85
+ Built for the MCP 1st Birthday Hackathon (November 14-30, 2025)
app.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ HuggingFace Space entry point for mcp-civil-compliance
4
+ """
5
+ import sys
6
+ import os
7
+
8
+ # Add current directory to path
9
+ sys.path.insert(0, os.path.dirname(__file__))
10
+
11
+ # Import and launch the Gradio app
12
+ from gradio_app import demo
13
+
14
+ if __name__ == "__main__":
15
+ demo.launch()
gradio_app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import sys
3
+ import os
4
+ import json
5
+
6
+ # Add current directory to path
7
+ sys.path.insert(0, os.path.dirname(__file__))
8
+
9
+ from shared_ui import components
10
+ from shared_assets import storage
11
+ from server.parser import ifc_parser
12
+ from server.rules_engine import engine
13
+ from shared_report import report
14
+
15
+ PROJECT_NAME = "civil_compliance"
16
+
17
+ DEFAULT_RULES = """
18
+ max_height: 20.0
19
+ min_area: 50.0
20
+ """
21
+
22
+ def process_pipeline(file_obj, rules_text):
23
+ if file_obj is None:
24
+ return "Please upload a file.", None, None
25
+
26
+ # 1. Save Asset
27
+ saved_path = storage.save_file(PROJECT_NAME, file_obj.name)
28
+
29
+ # 2. Parse
30
+ try:
31
+ metrics = ifc_parser.parse_model(saved_path)
32
+ except Exception as e:
33
+ return f"Parsing Error: {e}", None, None
34
+
35
+ # 3. Validate
36
+ try:
37
+ rules = engine.load_rules(rules_text)
38
+ validation = engine.evaluate(metrics, rules)
39
+ except Exception as e:
40
+ return f"Validation Error: {e}", None, None
41
+
42
+ # 4. Generate Report
43
+ report_data = {
44
+ "project_name": metrics.get("project_name", "Unknown"),
45
+ "date": "2023-10-27", # Dynamic date in real app
46
+ "summary": "Passed" if validation["passed"] else "Failed",
47
+ "validation_details": json.dumps(validation["checks"], indent=2)
48
+ }
49
+
50
+ report_path = saved_path + "_report.html"
51
+ report.create_report("compliance", report_data, report_path)
52
+
53
+ # Format output for UI
54
+ result_summary = f"**Status:** {'✅ Passed' if validation['passed'] else '❌ Failed'}\n\n"
55
+ result_summary += f"**Metrics:**\n{json.dumps(metrics, indent=2)}"
56
+
57
+ return result_summary, validation["checks"], report_path
58
+
59
+ with gr.Blocks(css=components.get_shared_css_path(), theme=gr.themes.Base()) as demo:
60
+ components.create_header("Civil Compliance Checker", "Validate IFC models against building codes.")
61
+
62
+ with gr.Row():
63
+ with gr.Column(scale=1):
64
+ file_input = components.create_file_uploader(label="Upload IFC Model", file_types=[".ifc"])
65
+ rules_input = gr.Code(value=DEFAULT_RULES, language="yaml", label="Validation Rules (YAML)")
66
+ validate_btn = gr.Button("Validate Design", variant="primary", elem_classes=["primary"])
67
+
68
+ with gr.Column(scale=2):
69
+ summary_output = gr.Markdown(label="Summary")
70
+ details_table = components.create_result_table(headers=["rule", "limit", "actual", "passed", "message"])
71
+ report_file = gr.File(label="Download Report")
72
+
73
+ validate_btn.click(
74
+ process_pipeline,
75
+ inputs=[file_input, rules_input],
76
+ outputs=[summary_output, details_table, report_file]
77
+ )
78
+
79
+ if __name__ == "__main__":
80
+ demo.launch(server_port=7860)
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fastapi\nuvicorn\npydantic\ngradio>=5.0.0\nmarkdown2\nifcopenshell\npyyaml\nllama-index\nmodal\nqdrant-client\n
server/__init__.py ADDED
File without changes
server/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (198 Bytes). View file
 
server/__pycache__/modal_parser.cpython-312.pyc ADDED
Binary file (3.98 kB). View file
 
server/__pycache__/rules_engine.cpython-312.pyc ADDED
Binary file (2.76 kB). View file
 
server/app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import logging
4
+
5
+ # Ensure we can import shared modules and project modules
6
+ project_root = os.path.join(os.path.dirname(__file__), "../../../")
7
+ sys.path.insert(0, project_root)
8
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
9
+
10
+ from shared_mcp.app import app
11
+ from shared_mcp.registry import registry
12
+ from shared_mcp.models import ToolSchema
13
+ from server.parser import ifc_parser
14
+ from server.rules_engine import engine
15
+ from server.rag.standards_index import rag_engine
16
+ from server import modal_parser
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # Register Tools
21
+
22
+ # 1. Parse IFC
23
+ registry.register(
24
+ ToolSchema(
25
+ name="parse_ifc",
26
+ description="Parses an IFC file to extract metrics.",
27
+ input_schema={
28
+ "type": "object",
29
+ "properties": {
30
+ "file_path": {"type": "string", "description": "Absolute path to the IFC file"}
31
+ },
32
+ "required": ["file_path"]
33
+ }
34
+ ),
35
+ ifc_parser.parse_model
36
+ )
37
+
38
+ # 2. Evaluate Rules
39
+ def evaluate_rules_wrapper(metrics: dict, rules_yaml: str):
40
+ rules = engine.load_rules(rules_yaml)
41
+ return engine.evaluate(metrics, rules)
42
+
43
+ registry.register(
44
+ ToolSchema(
45
+ name="evaluate_rules",
46
+ description="Evaluates building metrics against a set of rules.",
47
+ input_schema={
48
+ "type": "object",
49
+ "properties": {
50
+ "metrics": {"type": "object", "description": "Metrics dictionary"},
51
+ "rules_yaml": {"type": "string", "description": "Rules in YAML format"}
52
+ },
53
+ "required": ["metrics", "rules_yaml"]
54
+ }
55
+ ),
56
+ evaluate_rules_wrapper
57
+ )
58
+
59
+ # 3. Query Standards (RAG)
60
+ registry.register(
61
+ ToolSchema(
62
+ name="query_standards",
63
+ description="Queries building standards using RAG.",
64
+ input_schema={
65
+ "type": "object",
66
+ "properties": {
67
+ "query": {"type": "string", "description": "Question about standards"}
68
+ },
69
+ "required": ["query"]
70
+ }
71
+ ),
72
+ rag_engine.query
73
+ )
74
+
75
+ # 4. Parse IFC (Cloud/Modal)
76
+ registry.register(
77
+ ToolSchema(
78
+ name="parse_ifc_cloud",
79
+ description="Parses an IFC file using Modal cloud compute.",
80
+ input_schema={
81
+ "type": "object",
82
+ "properties": {
83
+ "file_path": {"type": "string", "description": "Absolute path to the IFC file"}
84
+ },
85
+ "required": ["file_path"]
86
+ }
87
+ ),
88
+ modal_parser.parse_model_remote
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ import uvicorn
93
+ uvicorn.run(app, host="0.0.0.0", port=8003)
server/modal_parser.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modal
2
+ import logging
3
+ import os
4
+ import sys
5
+
6
+ # Define the image with dependencies
7
+ image = modal.Image.debian_slim().pip_install("ifcopenshell")
8
+
9
+ app = modal.App("mcp-civil-parser")
10
+
11
+ @app.function(image=image)
12
+ def parse_ifc_cloud(file_content: bytes, filename: str):
13
+ """
14
+ Parses an IFC file in the cloud using Modal.
15
+ """
16
+ import ifcopenshell
17
+ import ifcopenshell.util.element
18
+ import os
19
+
20
+ # Save to temp file
21
+ temp_path = f"/tmp/{filename}"
22
+ with open(temp_path, "wb") as f:
23
+ f.write(file_content)
24
+
25
+ try:
26
+ model = ifcopenshell.open(temp_path)
27
+ except Exception as e:
28
+ return {"error": str(e)}
29
+
30
+ metrics = {
31
+ "filename": filename,
32
+ "project_name": "Unknown",
33
+ "total_floor_area": 0.0,
34
+ "building_height": 0.0,
35
+ "num_storeys": 0,
36
+ "source": "Modal Cloud"
37
+ }
38
+
39
+ # Extract Project Name
40
+ project = model.by_type("IfcProject")
41
+ if project:
42
+ metrics["project_name"] = project[0].Name
43
+
44
+ # Extract Storeys and Height
45
+ storeys = model.by_type("IfcBuildingStorey")
46
+ metrics["num_storeys"] = len(storeys)
47
+
48
+ min_elev = float('inf')
49
+ max_elev = float('-inf')
50
+
51
+ for storey in storeys:
52
+ elevation = storey.Elevation
53
+ if elevation is not None:
54
+ min_elev = min(min_elev, elevation)
55
+ max_elev = max(max_elev, elevation)
56
+
57
+ if min_elev != float('inf') and max_elev != float('-inf'):
58
+ metrics["building_height"] = max_elev - min_elev
59
+
60
+ # Extract Floor Area
61
+ total_area = 0.0
62
+ spaces = model.by_type("IfcSpace")
63
+ for space in spaces:
64
+ psets = ifcopenshell.util.element.get_psets(space)
65
+ for pset_name, pset_data in psets.items():
66
+ if "Area" in pset_data:
67
+ total_area += float(pset_data["Area"])
68
+ elif "GrossFloorArea" in pset_data:
69
+ total_area += float(pset_data["GrossFloorArea"])
70
+ elif "NetFloorArea" in pset_data:
71
+ total_area += float(pset_data["NetFloorArea"])
72
+
73
+ metrics["total_floor_area"] = total_area
74
+
75
+ return metrics
76
+
77
+ def parse_model_remote(file_path: str):
78
+ """
79
+ Wrapper to call the Modal function.
80
+ """
81
+ if not os.path.exists(file_path):
82
+ raise FileNotFoundError(f"File not found: {file_path}")
83
+
84
+ with open(file_path, "rb") as f:
85
+ content = f.read()
86
+
87
+ filename = os.path.basename(file_path)
88
+
89
+ # In a real app, we would use `with app.run():` or `f.remote()`
90
+ # For the hackathon demo, we'll try to invoke it if configured.
91
+ try:
92
+ with app.run():
93
+ return parse_ifc_cloud.remote(content, filename)
94
+ except Exception as e:
95
+ # Fallback or error
96
+ raise ValueError(f"Modal execution failed (Did you run 'modal token set'?): {e}")
server/parser/__pycache__/ifc_parser.cpython-312.pyc ADDED
Binary file (2.82 kB). View file
 
server/parser/ifc_parser.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ifcopenshell
2
+ import ifcopenshell.util.element
3
+ import logging
4
+ from typing import Dict, Any, Optional
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ def parse_model(file_path: str) -> Dict[str, Any]:
9
+ """
10
+ Parses an IFC file to extract basic building metrics.
11
+
12
+ Args:
13
+ file_path: Absolute path to the IFC file.
14
+
15
+ Returns:
16
+ Dictionary containing extracted metrics (floor_area, height, etc.).
17
+ """
18
+ try:
19
+ model = ifcopenshell.open(file_path)
20
+ except Exception as e:
21
+ logger.error(f"Failed to open IFC file {file_path}: {e}")
22
+ raise ValueError(f"Could not open IFC file: {e}")
23
+
24
+ metrics = {
25
+ "filename": file_path.split("/")[-1],
26
+ "project_name": "Unknown",
27
+ "total_floor_area": 0.0,
28
+ "building_height": 0.0,
29
+ "num_storeys": 0,
30
+ "units": "meters" # Default assumption, should parse from project
31
+ }
32
+
33
+ # Extract Project Name
34
+ project = model.by_type("IfcProject")
35
+ if project:
36
+ metrics["project_name"] = project[0].Name
37
+
38
+ # Extract Storeys and Height
39
+ storeys = model.by_type("IfcBuildingStorey")
40
+ metrics["num_storeys"] = len(storeys)
41
+
42
+ min_elev = float('inf')
43
+ max_elev = float('-inf')
44
+
45
+ for storey in storeys:
46
+ elevation = storey.Elevation
47
+ if elevation is not None:
48
+ min_elev = min(min_elev, elevation)
49
+ max_elev = max(max_elev, elevation)
50
+
51
+ if min_elev != float('inf') and max_elev != float('-inf'):
52
+ metrics["building_height"] = max_elev - min_elev
53
+
54
+ # Extract Floor Area (Simplified estimation based on slabs or spaces)
55
+ # This is a heuristic; real implementation would need precise geometry
56
+ total_area = 0.0
57
+ spaces = model.by_type("IfcSpace")
58
+ for space in spaces:
59
+ # Try to get area from property sets
60
+ psets = ifcopenshell.util.element.get_psets(space)
61
+ for pset_name, pset_data in psets.items():
62
+ if "Area" in pset_data:
63
+ total_area += float(pset_data["Area"])
64
+ elif "GrossFloorArea" in pset_data:
65
+ total_area += float(pset_data["GrossFloorArea"])
66
+ elif "NetFloorArea" in pset_data:
67
+ total_area += float(pset_data["NetFloorArea"])
68
+
69
+ metrics["total_floor_area"] = total_area
70
+
71
+ return metrics
server/rag/__pycache__/standards_index.cpython-312.pyc ADDED
Binary file (3.92 kB). View file
 
server/rag/standards_index.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from typing import List
4
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document
5
+ from llama_index.core.node_parser import SentenceSplitter
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ class StandardsIndex:
10
+ def __init__(self, data_dir: str):
11
+ self.data_dir = data_dir
12
+ self.index = None
13
+ self._initialize_index()
14
+
15
+ def _initialize_index(self):
16
+ """Initializes the VectorStoreIndex from documents in data_dir."""
17
+ if not os.path.exists(self.data_dir):
18
+ os.makedirs(self.data_dir, exist_ok=True)
19
+ # Create a dummy standard if empty
20
+ with open(os.path.join(self.data_dir, "sample_code.txt"), "w") as f:
21
+ f.write("Standard 101: Max building height is 20 meters. Min floor area is 50 sqm.")
22
+
23
+ # Check for OpenAI Key
24
+ if not os.environ.get("OPENAI_API_KEY"):
25
+ logger.warning("OPENAI_API_KEY not found. RAG will be disabled/mocked.")
26
+ return
27
+
28
+ try:
29
+ documents = SimpleDirectoryReader(self.data_dir).load_data()
30
+ self.index = VectorStoreIndex.from_documents(
31
+ documents,
32
+ transformations=[SentenceSplitter(chunk_size=512)]
33
+ )
34
+ logger.info(f"Initialized LlamaIndex with {len(documents)} documents.")
35
+ except Exception as e:
36
+ logger.error(f"Failed to initialize LlamaIndex: {e}")
37
+
38
+ def query(self, question: str) -> str:
39
+ """Queries the index."""
40
+ if not self.index:
41
+ return "RAG System not initialized (Missing OpenAI Key or Index Error). Standard 101 says Max Height 20m."
42
+
43
+ try:
44
+ query_engine = self.index.as_query_engine()
45
+ response = query_engine.query(question)
46
+ return str(response)
47
+ except Exception as e:
48
+ logger.error(f"Query failed: {e}")
49
+ return f"Error querying standards: {e}"
50
+
51
+ # Singleton
52
+ # Assumes 'standards' folder is in the project root
53
+ STANDARDS_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "standards")
54
+ rag_engine = StandardsIndex(STANDARDS_DIR)
server/rules_engine.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ import logging
3
+ from typing import Dict, Any, List
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class RuleEngine:
8
+ def __init__(self):
9
+ pass
10
+
11
+ def load_rules(self, rules_yaml: str) -> Dict[str, Any]:
12
+ """Loads rules from a YAML string."""
13
+ try:
14
+ return yaml.safe_load(rules_yaml)
15
+ except yaml.YAMLError as e:
16
+ logger.error(f"Error parsing rules YAML: {e}")
17
+ raise ValueError(f"Invalid rules format: {e}")
18
+
19
+ def evaluate(self, metrics: Dict[str, Any], rules: Dict[str, Any]) -> Dict[str, Any]:
20
+ """
21
+ Evaluates metrics against a set of rules.
22
+
23
+ Args:
24
+ metrics: Dictionary of extracted metrics (e.g., {'building_height': 15.0}).
25
+ rules: Dictionary of rules (e.g., {'max_height': 12.0}).
26
+
27
+ Returns:
28
+ Dictionary with pass/fail results and details.
29
+ """
30
+ results = {
31
+ "passed": True,
32
+ "checks": []
33
+ }
34
+
35
+ # Check Max Height
36
+ if "max_height" in rules:
37
+ limit = rules["max_height"]
38
+ actual = metrics.get("building_height", 0)
39
+ passed = actual <= limit
40
+ results["checks"].append({
41
+ "rule": "Max Height",
42
+ "limit": limit,
43
+ "actual": actual,
44
+ "passed": passed,
45
+ "message": "Height within limits" if passed else f"Height {actual} exceeds limit {limit}"
46
+ })
47
+ if not passed:
48
+ results["passed"] = False
49
+
50
+ # Check Min Area
51
+ if "min_area" in rules:
52
+ limit = rules["min_area"]
53
+ actual = metrics.get("total_floor_area", 0)
54
+ passed = actual >= limit
55
+ results["checks"].append({
56
+ "rule": "Min Floor Area",
57
+ "limit": limit,
58
+ "actual": actual,
59
+ "passed": passed,
60
+ "message": "Area meets minimum" if passed else f"Area {actual} is below minimum {limit}"
61
+ })
62
+ if not passed:
63
+ results["passed"] = False
64
+
65
+ return results
66
+
67
+ # Singleton for simple usage
68
+ engine = RuleEngine()
server/tools.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compliance helpers for CAD/IFC-driven MCP server."""
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from typing import Dict, List, Optional
7
+ import json
8
+ import yaml
9
+
10
+
11
+ @dataclass
12
+ class ToolResult:
13
+ title: str
14
+ summary: str
15
+ artifacts: Dict[str, str]
16
+
17
+
18
+ class IfcMetricsTool:
19
+ """Parse simplified IFC-like JSON to extract metrics used by rules."""
20
+
21
+ def extract_metrics(self, path: str) -> ToolResult:
22
+ data = json.loads(Path(path).read_text())
23
+ metrics = {
24
+ "height_m": float(data.get("height_m", 0)),
25
+ "footprint_area_m2": float(data.get("footprint_area_m2", 0)),
26
+ "num_floors": int(data.get("num_floors", 0)),
27
+ }
28
+ summary = f"Height: {metrics['height_m']} m, area: {metrics['footprint_area_m2']} m^2, floors: {metrics['num_floors']}"
29
+ return ToolResult(title="IFC metrics", summary=summary, artifacts={"metrics": json.dumps(metrics, indent=2)})
30
+
31
+
32
+ class RuleEvaluator:
33
+ def evaluate(self, metrics: Dict[str, float], rules_path: str) -> ToolResult:
34
+ rule_pack = yaml.safe_load(Path(rules_path).read_text())
35
+ results: List[Dict[str, object]] = []
36
+ passed = True
37
+ for rule in rule_pack.get("rules", []):
38
+ field = rule["field"]
39
+ op = rule.get("op", "<=")
40
+ threshold = rule.get("value", 0)
41
+ observed = metrics.get(field, 0)
42
+ if op == "<=":
43
+ ok = observed <= threshold
44
+ elif op == ">=":
45
+ ok = observed >= threshold
46
+ else:
47
+ ok = False
48
+ results.append({"rule": rule.get("name", field), "pass": ok, "observed": observed, "threshold": threshold})
49
+ passed = passed and ok
50
+
51
+ summary = "All rules passed" if passed else "Rule violations detected"
52
+ return ToolResult(
53
+ title="Compliance evaluation",
54
+ summary=summary,
55
+ artifacts={"results": json.dumps(results, indent=2), "passed": json.dumps(passed)},
56
+ )
57
+
58
+
59
+ class GeminiDocTool:
60
+ def draft_prompt(self, description: str, missing_items: List[str]) -> ToolResult:
61
+ checklist = "\n".join([f"- {item}" for item in missing_items])
62
+ prompt = (
63
+ "You are a permitting reviewer. Summarize the project description and list missing artifacts.\n"
64
+ f"Description: {description}\nMissing items:\n{checklist}"
65
+ )
66
+ return ToolResult(
67
+ title="Gemini document prompt",
68
+ summary="Prompt prepared for Gemini-powered review.",
69
+ artifacts={"prompt": prompt},
70
+ )
71
+
72
+
73
+ class StandardCatalogTool:
74
+ """Search ISO/IEC/EN standard snippets for quick lookups."""
75
+
76
+ def __init__(self, catalog_path: Optional[str] = None) -> None:
77
+ base = Path(__file__).resolve().parent.parent
78
+ self.catalog_path = Path(catalog_path or base / "standards" / "catalog.yaml")
79
+
80
+ def search(self, keyword: str, jurisdiction: Optional[str] = None, tag: Optional[str] = None) -> ToolResult:
81
+ catalog = yaml.safe_load(self.catalog_path.read_text()) or {}
82
+ entries: List[Dict[str, str]] = catalog.get("standards", [])
83
+ keyword_lower = keyword.lower()
84
+ filtered = []
85
+ for entry in entries:
86
+ title = entry.get("title", "")
87
+ desc = entry.get("summary", "")
88
+ jurisdiction_match = True if not jurisdiction else jurisdiction.lower() in entry.get("jurisdiction", "").lower()
89
+ tag_match = True if not tag else tag.lower() in " ".join(entry.get("tags", [])).lower()
90
+ keyword_match = keyword_lower in title.lower() or keyword_lower in desc.lower() or keyword_lower in entry.get("id", "").lower()
91
+ if jurisdiction_match and tag_match and keyword_match:
92
+ filtered.append(entry)
93
+
94
+ summary = f"Found {len(filtered)} standards matching '{keyword}'"
95
+ return ToolResult(title="Standards lookup", summary=summary, artifacts={"standards": json.dumps(filtered, indent=2)})
96
+
97
+
98
+ class BuildingCodeChecklistTool:
99
+ """Generate building-code and ISO alignment checklist for a project brief."""
100
+
101
+ def generate(self, occupancy: str, construction_type: str, height_m: float) -> ToolResult:
102
+ checklist = [
103
+ {
104
+ "item": "IBC/IRC occupancy confirmation",
105
+ "details": f"Verify occupancy group '{occupancy}' against Chapter 3 use definitions.",
106
+ },
107
+ {
108
+ "item": "Construction type vs height",
109
+ "details": (
110
+ f"Check {construction_type} height {height_m} m against IBC Table 504 allowances; flag podium separations if needed."
111
+ ),
112
+ },
113
+ {
114
+ "item": "Fire resistance & egress",
115
+ "details": "Confirm fire ratings, stair/exit widths, and travel distance per IBC Chapters 7, 10; align with NFPA 101 where applicable.",
116
+ },
117
+ {
118
+ "item": "Accessibility",
119
+ "details": "Coordinate ANSI A117.1 and ISO 21542 clearances for doors, ramps, and toilets; ensure jurisdictional amendments are captured.",
120
+ },
121
+ {
122
+ "item": "Energy + sustainability",
123
+ "details": "Map envelope/MEP targets to IECC/ASHRAE 90.1 plus ISO 52000 energy performance indicators.",
124
+ },
125
+ {
126
+ "item": "Digital delivery",
127
+ "details": "Ensure BIM deliverables follow ISO 19650 naming and exchange information requirements (EIR/BEP).",
128
+ },
129
+ ]
130
+ return ToolResult(
131
+ title="Building code checklist",
132
+ summary="Checklist drafted with IBC/ISO references.",
133
+ artifacts={"checklist": json.dumps(checklist, indent=2)},
134
+ )
135
+
136
+
137
+ class IsoMappingTool:
138
+ """Map project artifacts to relevant ISO/EN references for documentation."""
139
+
140
+ def __init__(self, mapping_path: Optional[str] = None) -> None:
141
+ base = Path(__file__).resolve().parent.parent
142
+ self.mapping_path = Path(mapping_path or base / "standards" / "iso_mapping.yaml")
143
+
144
+ def map_artifacts(self, artifact_types: List[str]) -> ToolResult:
145
+ mapping_doc = yaml.safe_load(self.mapping_path.read_text()) or {}
146
+ mappings = mapping_doc.get("mappings", [])
147
+ matches: List[Dict[str, str]] = []
148
+ lower_artifacts = [a.lower() for a in artifact_types]
149
+ for entry in mappings:
150
+ if any(token in lower_artifacts for token in [entry.get("artifact", "").lower()] + [t.lower() for t in entry.get("aliases", [])]):
151
+ matches.append(entry)
152
+
153
+ summary = f"Matched {len(matches)} artifact types to ISO/EN references"
154
+ return ToolResult(
155
+ title="ISO/EN mappings",
156
+ summary=summary,
157
+ artifacts={"mappings": json.dumps(matches, indent=2)},
158
+ )
159
+
160
+
161
+ class HealthcareSpaceProgramTool:
162
+ """Check healthcare department spaces against minimum areas."""
163
+
164
+ def __init__(self, space_path: Optional[str] = None) -> None:
165
+ base = Path(__file__).resolve().parent.parent
166
+ self.space_path = Path(space_path or base / "standards" / "healthcare_spaces.yaml")
167
+
168
+ def evaluate(self, program: List[Dict[str, object]]) -> ToolResult:
169
+ config = yaml.safe_load(self.space_path.read_text()) or {}
170
+ standards = {entry.get("space", "").lower(): entry for entry in config.get("spaces", [])}
171
+ results: List[Dict[str, object]] = []
172
+ for item in program:
173
+ name = str(item.get("space", "")).lower()
174
+ provided = float(item.get("area_m2", 0))
175
+ std = standards.get(name)
176
+ if std:
177
+ min_area = float(std.get("min_area_m2", 0))
178
+ ok = provided >= min_area
179
+ results.append(
180
+ {
181
+ "space": item.get("space", ""),
182
+ "provided_m2": provided,
183
+ "min_m2": min_area,
184
+ "pass": ok,
185
+ "notes": std.get("notes", ""),
186
+ }
187
+ )
188
+ else:
189
+ results.append(
190
+ {
191
+ "space": item.get("space", ""),
192
+ "provided_m2": provided,
193
+ "min_m2": None,
194
+ "pass": None,
195
+ "notes": "No healthcare baseline found",
196
+ }
197
+ )
198
+
199
+ summary = "Healthcare program validated" if all(r.get("pass", True) is not False for r in results) else "Program needs attention"
200
+ return ToolResult(
201
+ title="Healthcare space check",
202
+ summary=summary,
203
+ artifacts={"results": json.dumps(results, indent=2)},
204
+ )
205
+
206
+
207
+ class HealthcareCodeBundleTool:
208
+ """Return key US healthcare code touchpoints for a facility type."""
209
+
210
+ def summarize(self, facility_type: str, risk_category: Optional[str] = None) -> ToolResult:
211
+ ft_lower = facility_type.lower()
212
+ bundle: List[Dict[str, str]] = [
213
+ {
214
+ "code": "IBC 2021 - Group I-2",
215
+ "focus": "Occupancy definition, smoke compartment sizing, egress width for patient care areas.",
216
+ },
217
+ {
218
+ "code": "NFPA 101 Life Safety Code",
219
+ "focus": "Horizontal/vertical separation, defend-in-place strategies, healthcare corridor requirements.",
220
+ },
221
+ {
222
+ "code": "NFPA 99 Health Care Facilities Code",
223
+ "focus": "Medical gas categories, electrical systems, ITM for critical branches.",
224
+ },
225
+ {
226
+ "code": "FGI 2022 Guidelines",
227
+ "focus": "Space sizes, clearances, nurse station visibility, sterile/soiled flows.",
228
+ },
229
+ ]
230
+
231
+ if "outpatient" in ft_lower:
232
+ bundle.append(
233
+ {
234
+ "code": "CMS ASC (41 CFR 416)",
235
+ "focus": "Ambulatory surgical center infection control, anesthesia gas storage, recovery bays.",
236
+ }
237
+ )
238
+ else:
239
+ bundle.append(
240
+ {
241
+ "code": "CMS Conditions of Participation (42 CFR 482)",
242
+ "focus": "Emergency services, patient rights, physical environment maintenance for hospitals.",
243
+ }
244
+ )
245
+
246
+ if risk_category:
247
+ bundle.append(
248
+ {
249
+ "code": "ASCE 7 Risk Category",
250
+ "focus": f"Confirm essential facility structural/Seismic Design Category for risk category {risk_category}.",
251
+ }
252
+ )
253
+
254
+ summary = f"Healthcare code bundle for {facility_type}"
255
+ return ToolResult(title="Healthcare codes", summary=summary, artifacts={"bundle": json.dumps(bundle, indent=2)})
256
+
257
+
258
+ __all__ = [
259
+ "ToolResult",
260
+ "IfcMetricsTool",
261
+ "RuleEvaluator",
262
+ "GeminiDocTool",
263
+ "StandardCatalogTool",
264
+ "BuildingCodeChecklistTool",
265
+ "IsoMappingTool",
266
+ "HealthcareSpaceProgramTool",
267
+ "HealthcareCodeBundleTool",
268
+ ]
shared_assets/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Shared module
shared_assets/__pycache__/storage.cpython-312.pyc ADDED
Binary file (3.57 kB). View file
 
shared_assets/storage.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import uuid
4
+ import logging
5
+ from pathlib import Path
6
+ from typing import List, Optional
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ ASSETS_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "assets")
11
+
12
+ def _get_project_dir(project_name: str) -> Path:
13
+ path = Path(ASSETS_ROOT) / project_name
14
+ path.mkdir(parents=True, exist_ok=True)
15
+ return path
16
+
17
+ def save_file(project_name: str, file_path: str, original_filename: Optional[str] = None) -> str:
18
+ """
19
+ Saves a file to the project's asset directory.
20
+
21
+ Args:
22
+ project_name: Name of the project (e.g., 'civil', 'geo', 'video').
23
+ file_path: Path to the temporary file (e.g., from Gradio upload).
24
+ original_filename: Original name of the file.
25
+
26
+ Returns:
27
+ Absolute path to the saved file.
28
+ """
29
+ project_dir = _get_project_dir(project_name)
30
+
31
+ if original_filename:
32
+ filename = original_filename
33
+ else:
34
+ filename = f"{uuid.uuid4()}_{os.path.basename(file_path)}"
35
+
36
+ # Sanitize filename (basic)
37
+ filename = "".join(c for c in filename if c.isalnum() or c in "._- ")
38
+
39
+ dest_path = project_dir / filename
40
+
41
+ try:
42
+ shutil.copy2(file_path, dest_path)
43
+ logger.info(f"Saved asset to {dest_path}")
44
+ return str(dest_path)
45
+ except Exception as e:
46
+ logger.error(f"Failed to save asset: {e}")
47
+ raise IOError(f"Failed to save asset: {e}")
48
+
49
+ def list_files(project_name: str) -> List[str]:
50
+ """Lists all files in the project's asset directory."""
51
+ project_dir = _get_project_dir(project_name)
52
+ if not project_dir.exists():
53
+ return []
54
+ return [str(p) for p in project_dir.glob("*") if p.is_file()]
55
+
56
+ def get_file_path(project_name: str, filename: str) -> Optional[str]:
57
+ """Gets the absolute path of a file if it exists."""
58
+ project_dir = _get_project_dir(project_name)
59
+ path = project_dir / filename
60
+ return str(path) if path.exists() else None
shared_mcp/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Shared MCP Framework
shared_mcp/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (162 Bytes). View file
 
shared_mcp/__pycache__/app.cpython-312.pyc ADDED
Binary file (2.55 kB). View file
 
shared_mcp/__pycache__/models.cpython-312.pyc ADDED
Binary file (1.37 kB). View file
 
shared_mcp/__pycache__/registry.cpython-312.pyc ADDED
Binary file (2.91 kB). View file
 
shared_mcp/app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from .models import ToolRequest, ToolResponse, ToolSchema
4
+ from .registry import registry
5
+ from typing import List
6
+
7
+ app = FastAPI(title="Shared MCP Server", version="0.1.0")
8
+
9
+ app.add_middleware(
10
+ CORSMiddleware,
11
+ allow_origins=["*"],
12
+ allow_credentials=True,
13
+ allow_methods=["*"],
14
+ allow_headers=["*"],
15
+ )
16
+
17
+ @app.get("/tools", response_model=List[ToolSchema])
18
+ async def list_tools():
19
+ """Lists all available tools."""
20
+ return registry.list_tools()
21
+
22
+ @app.post("/invoke", response_model=ToolResponse)
23
+ async def invoke_tool(request: ToolRequest):
24
+ """Invokes a specific tool."""
25
+ try:
26
+ result = await registry.invoke(request.tool_name, request.arguments)
27
+
28
+ # Format result as MCP content list
29
+ content = []
30
+ if isinstance(result, str):
31
+ content.append({"type": "text", "text": result})
32
+ elif isinstance(result, dict) or isinstance(result, list):
33
+ import json
34
+ content.append({"type": "text", "text": json.dumps(result)})
35
+ else:
36
+ content.append({"type": "text", "text": str(result)})
37
+
38
+ return ToolResponse(content=content)
39
+
40
+ except ValueError as e:
41
+ raise HTTPException(status_code=404, detail=str(e))
42
+ except Exception as e:
43
+ raise HTTPException(status_code=500, detail=str(e))
44
+
45
+ @app.get("/health")
46
+ async def health_check():
47
+ return {"status": "ok"}
shared_mcp/models.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import Dict, Any, Optional, List
3
+
4
+ class ToolSchema(BaseModel):
5
+ name: str = Field(..., description="The name of the tool")
6
+ description: str = Field(..., description="A description of what the tool does")
7
+ input_schema: Dict[str, Any] = Field(..., description="JSON schema for the tool input")
8
+
9
+ class ToolRequest(BaseModel):
10
+ tool_name: str
11
+ arguments: Dict[str, Any]
12
+
13
+ class ToolResponse(BaseModel):
14
+ content: List[Dict[str, Any]]
15
+ is_error: bool = False
shared_mcp/registry.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Dict, List, Any
2
+ from .models import ToolSchema
3
+ import logging
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class ToolRegistry:
8
+ def __init__(self):
9
+ self._tools: Dict[str, ToolSchema] = {}
10
+ self._handlers: Dict[str, Callable] = {}
11
+
12
+ def register(self, schema: ToolSchema, handler: Callable):
13
+ """Registers a tool with its schema and handler function."""
14
+ if schema.name in self._tools:
15
+ logger.warning(f"Overwriting existing tool: {schema.name}")
16
+
17
+ self._tools[schema.name] = schema
18
+ self._handlers[schema.name] = handler
19
+ logger.info(f"Registered tool: {schema.name}")
20
+
21
+ def get_tool(self, name: str) -> ToolSchema:
22
+ return self._tools.get(name)
23
+
24
+ def list_tools(self) -> List[ToolSchema]:
25
+ return list(self._tools.values())
26
+
27
+ async def invoke(self, name: str, arguments: Dict[str, Any]) -> Any:
28
+ handler = self._handlers.get(name)
29
+ if not handler:
30
+ raise ValueError(f"Tool not found: {name}")
31
+
32
+ try:
33
+ # Support both async and sync handlers
34
+ if hasattr(handler, '__call__'):
35
+ import inspect
36
+ if inspect.iscoroutinefunction(handler):
37
+ return await handler(**arguments)
38
+ else:
39
+ return handler(**arguments)
40
+ except Exception as e:
41
+ logger.error(f"Error invoking tool {name}: {e}")
42
+ raise e
43
+
44
+ # Global registry instance
45
+ registry = ToolRegistry()
shared_report/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Shared module
shared_report/report.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Dict, Any
3
+ import markdown2
4
+ # import weasyprint # Commented out as it might require system deps like pango/cairo
5
+ import os
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ # Placeholder for Gemini client
10
+ # In a real implementation, we'd import the actual Gemini client wrapper
11
+ class MockGeminiClient:
12
+ def generate_content(self, prompt: str) -> str:
13
+ return f"Generated report content based on: {prompt[:50]}..."
14
+
15
+ gemini_client = MockGeminiClient()
16
+
17
+ TEMPLATES = {
18
+ "compliance": """
19
+ # Compliance Report
20
+ **Project:** {project_name}
21
+ **Date:** {date}
22
+
23
+ ## Summary
24
+ {summary}
25
+
26
+ ## Validation Results
27
+ {validation_details}
28
+ """,
29
+ "geo": """
30
+ # Geo Insights Report
31
+ **Region:** {region_name}
32
+ **Analysis Date:** {date}
33
+
34
+ ## Findings
35
+ {findings}
36
+
37
+ ## Recommendations
38
+ {recommendations}
39
+ """,
40
+ "video": """
41
+ # Video Production Summary
42
+ **Project:** {project_name}
43
+
44
+ ## Pipeline Steps
45
+ {pipeline_steps}
46
+
47
+ ## Script
48
+ {script}
49
+ """
50
+ }
51
+
52
+ def generate_report_markdown(template_name: str, data: Dict[str, Any]) -> str:
53
+ """Generates a markdown report using a template and data."""
54
+ template = TEMPLATES.get(template_name)
55
+ if not template:
56
+ raise ValueError(f"Template not found: {template_name}")
57
+
58
+ # Simple string formatting for now
59
+ # In production, use Jinja2
60
+ try:
61
+ return template.format(**data)
62
+ except KeyError as e:
63
+ logger.error(f"Missing data for template {template_name}: {e}")
64
+ return f"Error generating report: Missing data {e}"
65
+
66
+ def generate_pdf(markdown_content: str, output_path: str):
67
+ """Converts markdown to PDF."""
68
+ html_content = markdown2.markdown(markdown_content)
69
+
70
+ # Basic HTML wrapper with styles
71
+ full_html = f"""
72
+ <html>
73
+ <head>
74
+ <style>
75
+ body {{ font-family: sans-serif; padding: 20px; }}
76
+ h1 {{ color: #333; }}
77
+ h2 {{ color: #555; border-bottom: 1px solid #ccc; }}
78
+ </style>
79
+ </head>
80
+ <body>
81
+ {html_content}
82
+ </body>
83
+ </html>
84
+ """
85
+
86
+ # For hackathon without weasyprint, we might just save HTML or Markdown
87
+ # If weasyprint was available:
88
+ # weasyprint.HTML(string=full_html).write_pdf(output_path)
89
+
90
+ # Fallback: Save as HTML
91
+ with open(output_path.replace(".pdf", ".html"), "w") as f:
92
+ f.write(full_html)
93
+ logger.info(f"Report saved to {output_path.replace('.pdf', '.html')}")
94
+
95
+ def create_report(template_name: str, data: Dict[str, Any], output_path: str):
96
+ """High-level function to create a report."""
97
+ md = generate_report_markdown(template_name, data)
98
+ generate_pdf(md, output_path)
99
+ return output_path
shared_ui/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Shared module
shared_ui/__pycache__/components.cpython-312.pyc ADDED
Binary file (2.37 kB). View file
 
shared_ui/components.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ def get_shared_css_path():
5
+ """Returns the absolute path to the shared CSS file."""
6
+ current_dir = os.path.dirname(os.path.abspath(__file__))
7
+ return os.path.join(current_dir, "style.css")
8
+
9
+ def create_header(title: str, subtitle: str):
10
+ """Creates a standard header component."""
11
+ with gr.Row():
12
+ with gr.Column():
13
+ gr.Markdown(f"# {title}")
14
+ gr.Markdown(f"### {subtitle}")
15
+
16
+ def create_file_uploader(label="Upload File", file_types=None):
17
+ """Creates a standardized file uploader."""
18
+ return gr.File(
19
+ label=label,
20
+ file_types=file_types or [],
21
+ type="filepath",
22
+ elem_classes=["upload-box"]
23
+ )
24
+
25
+ def create_result_table(headers=None):
26
+ """Creates a standardized result table."""
27
+ return gr.Dataframe(
28
+ headers=headers,
29
+ interactive=False,
30
+ wrap=True
31
+ )
32
+
33
+ def show_success_toast(message: str):
34
+ """(Placeholder) In a real app, this would trigger a client-side toast."""
35
+ return gr.Info(message)
36
+
37
+ def show_error_toast(message: str):
38
+ """(Placeholder) In a real app, this would trigger a client-side toast."""
39
+ return gr.Warning(message)
shared_ui/style.css ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Shared Premium Dark Theme */
2
+ :root {
3
+ --primary-color: #6366f1;
4
+ --secondary-color: #a855f7;
5
+ --background-dark: #0f172a;
6
+ --surface-dark: #1e293b;
7
+ --text-light: #f8fafc;
8
+ --text-dim: #94a3b8;
9
+ }
10
+
11
+ body {
12
+ background-color: var(--background-dark);
13
+ color: var(--text-light);
14
+ font-family: 'Inter', sans-serif;
15
+ }
16
+
17
+ .gradio-container {
18
+ background-color: var(--background-dark) !important;
19
+ }
20
+
21
+ /* Cards / Blocks */
22
+ .block {
23
+ background-color: var(--surface-dark) !important;
24
+ border: 1px solid #334155 !important;
25
+ border-radius: 0.75rem !important;
26
+ box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
27
+ }
28
+
29
+ /* Headers */
30
+ h1, h2, h3 {
31
+ background: linear-gradient(to right, var(--primary-color), var(--secondary-color));
32
+ -webkit-background-clip: text;
33
+ -webkit-text-fill-color: transparent;
34
+ font-weight: 700;
35
+ }
36
+
37
+ /* Buttons */
38
+ button.primary {
39
+ background: linear-gradient(135deg, var(--primary-color), var(--secondary-color)) !important;
40
+ border: none !important;
41
+ color: white !important;
42
+ font-weight: 600 !important;
43
+ transition: transform 0.1s ease-in-out;
44
+ }
45
+
46
+ button.primary:hover {
47
+ transform: scale(1.02);
48
+ opacity: 0.9;
49
+ }
50
+
51
+ /* Tables */
52
+ table {
53
+ border-collapse: collapse;
54
+ width: 100%;
55
+ }
56
+
57
+ th {
58
+ background-color: #334155;
59
+ color: var(--text-light);
60
+ padding: 0.75rem;
61
+ text-align: left;
62
+ }
63
+
64
+ td {
65
+ border-bottom: 1px solid #334155;
66
+ padding: 0.75rem;
67
+ color: var(--text-dim);
68
+ }
69
+
70
+ /* File Upload */
71
+ .upload-box {
72
+ border: 2px dashed #475569 !important;
73
+ background-color: rgba(30, 41, 59, 0.5) !important;
74
+ transition: border-color 0.2s;
75
+ }
76
+
77
+ .upload-box:hover {
78
+ border-color: var(--primary-color) !important;
79
+ }