""" INVINCIX Chatbot - Gradio Interface with Hugging Face Model Beautiful UI with chat history and settings """ import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig import torch # ================================================================ # CONFIGURATION # ================================================================ # TODO: Replace with your Hugging Face model path MODEL_NAME = "amanjain96/invincix-chatbot" # Your HF model TITLE = "💬 INVINCIX Chatbot" DESCRIPTION = """ **"Simplicity is our culture and simplification is what we do"** Welcome to INVINCIX! I'm here to help you learn about our services, products, and how we can support your business transformation journey. Ask me about: - 🛠️ Our Services (Software Development, Product Engineering, Mobile Apps) - 📦 Our Products (Xprodedge, DigiStack, Zikshaa, AD4P) - 🌟 Company Culture & Values - 📍 Locations & Contact Information """ COMPANY_INFO = """ ### 🎯 About INVINCIX **Mission**: Digitize 500+ startups **Core Values**: - 🤝 Engage - 💡 Innovate - 🚀 Invent - ⭐ Excel **Services**: - Software Development - Product Engineering - Mobile Applications - Data Analytics - Cloud Solutions - IoT & Drone Solutions **Products**: - **Xprodedge**: Product innovation platform - **DigiStack**: Service automation - **Zikshaa**: Mentorship platform - **AD4P**: Application development **Contact**: - 📧 info@invincix.com - 📱 +91 674 297 2316 - 🌐 [invincix.com](https://invincix.com) - 📍 Bhubaneswar, India + 15 global locations """ # ================================================================ # LOAD MODEL # ================================================================ device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Loading model on {device}...") try: tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, torch_dtype=torch.float16 if device == "cuda" else torch.float32, device_map="auto" if device == "cuda" else None, low_cpu_mem_usage=True ) if device == "cpu": model = model.to(device) print("✅ Model loaded successfully!") MODEL_LOADED = True except Exception as e: print(f"❌ Error loading model: {e}") MODEL_LOADED = False error_message = str(e) # ================================================================ # GENERATE RESPONSE # ================================================================ def generate_response(message, history, temperature=0.7, max_tokens=300): """Generate chatbot response""" if not MODEL_LOADED: return f"❌ Model failed to load. Error: {error_message}\n\nPlease check your model path and try again." # Build prompt with history conversation = "" for user_msg, bot_msg in history: conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n\n" prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request as an INVINCIX chatbot assistant. ### Instruction: You are a helpful and empathetic assistant for INVINCIX, a software engineering and product development company. Respond to the customer's inquiry with warmth, professionalism, and accurate information about INVINCIX's services, products, and values. Keep your response concise and engaging. ### Previous Conversation: {conversation} ### Input: {message} ### Response: """ try: # Tokenize inputs = tokenizer(prompt, return_tensors="pt").to(device) # Generate with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=max_tokens, temperature=temperature, top_p=0.9, do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, ) # Decode full_response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract response if "### Response:" in full_response: answer = full_response.split("### Response:")[-1].strip() else: answer = full_response[len(prompt):].strip() # Clean up answer = answer.split("###")[0].strip() answer = answer.split("### Input:")[0].strip() return answer except Exception as e: return f"I apologize, but I encountered an error: {str(e)}. Please try again." # ================================================================ # GRADIO INTERFACE # ================================================================ # Custom CSS for INVINCIX branding custom_css = """ #title { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 20px; border-radius: 10px; color: white; text-align: center; margin-bottom: 20px; } #chatbot { height: 500px; } .gradio-container { font-family: 'Arial', sans-serif; } #footer { text-align: center; color: #7f8c8d; padding: 20px; font-size: 0.9em; } """ # Create interface with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: # Header gr.Markdown( f"""