Spaces:
Sleeping
Sleeping
| """ | |
| INVINCIX Chatbot - Gradio Interface with Hugging Face Model | |
| Beautiful UI with chat history and settings | |
| """ | |
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig | |
| import torch | |
| # ================================================================ | |
| # CONFIGURATION | |
| # ================================================================ | |
| # TODO: Replace with your Hugging Face model path | |
| MODEL_NAME = "amanjain96/invincix-chatbot" # Your HF model | |
| TITLE = "π¬ INVINCIX Chatbot" | |
| DESCRIPTION = """ | |
| **"Simplicity is our culture and simplification is what we do"** | |
| Welcome to INVINCIX! I'm here to help you learn about our services, products, and how we can support your business transformation journey. | |
| Ask me about: | |
| - π οΈ Our Services (Software Development, Product Engineering, Mobile Apps) | |
| - π¦ Our Products (Xprodedge, DigiStack, Zikshaa, AD4P) | |
| - π Company Culture & Values | |
| - π Locations & Contact Information | |
| """ | |
| COMPANY_INFO = """ | |
| ### π― About INVINCIX | |
| **Mission**: Digitize 500+ startups | |
| **Core Values**: | |
| - π€ Engage | |
| - π‘ Innovate | |
| - π Invent | |
| - β Excel | |
| **Services**: | |
| - Software Development | |
| - Product Engineering | |
| - Mobile Applications | |
| - Data Analytics | |
| - Cloud Solutions | |
| - IoT & Drone Solutions | |
| **Products**: | |
| - **Xprodedge**: Product innovation platform | |
| - **DigiStack**: Service automation | |
| - **Zikshaa**: Mentorship platform | |
| - **AD4P**: Application development | |
| **Contact**: | |
| - π§ info@invincix.com | |
| - π± +91 674 297 2316 | |
| - π [invincix.com](https://invincix.com) | |
| - π Bhubaneswar, India + 15 global locations | |
| """ | |
| # ================================================================ | |
| # LOAD MODEL | |
| # ================================================================ | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"Loading model on {device}...") | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=torch.float16 if device == "cuda" else torch.float32, | |
| device_map="auto" if device == "cuda" else None, | |
| low_cpu_mem_usage=True | |
| ) | |
| if device == "cpu": | |
| model = model.to(device) | |
| print("β Model loaded successfully!") | |
| MODEL_LOADED = True | |
| except Exception as e: | |
| print(f"β Error loading model: {e}") | |
| MODEL_LOADED = False | |
| error_message = str(e) | |
| # ================================================================ | |
| # GENERATE RESPONSE | |
| # ================================================================ | |
| def generate_response(message, history, temperature=0.7, max_tokens=300): | |
| """Generate chatbot response""" | |
| if not MODEL_LOADED: | |
| return f"β Model failed to load. Error: {error_message}\n\nPlease check your model path and try again." | |
| # Build prompt with history | |
| conversation = "" | |
| for user_msg, bot_msg in history: | |
| conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n\n" | |
| prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request as an INVINCIX chatbot assistant. | |
| ### Instruction: | |
| You are a helpful and empathetic assistant for INVINCIX, a software engineering and product development company. Respond to the customer's inquiry with warmth, professionalism, and accurate information about INVINCIX's services, products, and values. Keep your response concise and engaging. | |
| ### Previous Conversation: | |
| {conversation} | |
| ### Input: | |
| {message} | |
| ### Response: | |
| """ | |
| try: | |
| # Tokenize | |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
| # Generate | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=0.9, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id, | |
| ) | |
| # Decode | |
| full_response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract response | |
| if "### Response:" in full_response: | |
| answer = full_response.split("### Response:")[-1].strip() | |
| else: | |
| answer = full_response[len(prompt):].strip() | |
| # Clean up | |
| answer = answer.split("###")[0].strip() | |
| answer = answer.split("### Input:")[0].strip() | |
| return answer | |
| except Exception as e: | |
| return f"I apologize, but I encountered an error: {str(e)}. Please try again." | |
| # ================================================================ | |
| # GRADIO INTERFACE | |
| # ================================================================ | |
| # Custom CSS for INVINCIX branding | |
| custom_css = """ | |
| #title { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| padding: 20px; | |
| border-radius: 10px; | |
| color: white; | |
| text-align: center; | |
| margin-bottom: 20px; | |
| } | |
| #chatbot { | |
| height: 500px; | |
| } | |
| .gradio-container { | |
| font-family: 'Arial', sans-serif; | |
| } | |
| #footer { | |
| text-align: center; | |
| color: #7f8c8d; | |
| padding: 20px; | |
| font-size: 0.9em; | |
| } | |
| """ | |
| # Create interface | |
| with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: | |
| # Header | |
| gr.Markdown( | |
| f""" | |
| <div id="title"> | |
| <h1>{TITLE}</h1> | |
| </div> | |
| """ | |
| ) | |
| gr.Markdown(DESCRIPTION) | |
| # Main chat interface | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| chatbot = gr.Chatbot( | |
| elem_id="chatbot", | |
| label="Chat with INVINCIX", | |
| avatar_images=(None, "π€"), | |
| bubble_full_width=False, | |
| show_copy_button=True | |
| ) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| label="Your Message", | |
| placeholder="Ask me anything about INVINCIX...", | |
| lines=2, | |
| scale=4 | |
| ) | |
| submit = gr.Button("Send π€", variant="primary", scale=1) | |
| with gr.Row(): | |
| clear = gr.Button("ποΈ Clear Chat") | |
| retry = gr.Button("π Retry") | |
| # Sidebar with settings and info | |
| with gr.Column(scale=1): | |
| gr.Markdown("### βοΈ Settings") | |
| temperature = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature", | |
| info="Lower = more consistent, Higher = more creative" | |
| ) | |
| max_tokens = gr.Slider( | |
| minimum=100, | |
| maximum=500, | |
| value=300, | |
| step=50, | |
| label="Max Tokens", | |
| info="Maximum response length" | |
| ) | |
| gr.Markdown("---") | |
| gr.Markdown(COMPANY_INFO) | |
| # System status | |
| status_emoji = "β‘" if device == "cuda" else "π»" | |
| model_status = "β Loaded" if MODEL_LOADED else "β Error" | |
| gr.Markdown(f""" | |
| ### π System Status | |
| - Device: {status_emoji} {device.upper()} | |
| - Model: {model_status} | |
| - Ready: {"β " if MODEL_LOADED else "β"} | |
| """) | |
| # Examples | |
| gr.Examples( | |
| examples=[ | |
| "What is INVINCIX?", | |
| "Tell me about your services", | |
| "What is Xprodedge?", | |
| "How can you help startups?", | |
| "What are your office locations?", | |
| "Tell me about your company culture", | |
| ], | |
| inputs=msg, | |
| label="π‘ Try these questions:" | |
| ) | |
| # Footer | |
| gr.Markdown( | |
| """ | |
| <div id="footer"> | |
| <p><em>"We keep our feet grounded to ensure your head is in the cloud"</em></p> | |
| <p>Powered by fine-tuned AI | Built for INVINCIX</p> | |
| </div> | |
| """ | |
| ) | |
| # Event handlers | |
| def respond(message, chat_history, temp, max_tok): | |
| if not message.strip(): | |
| return "", chat_history | |
| bot_message = generate_response(message, chat_history, temp, max_tok) | |
| chat_history.append((message, bot_message)) | |
| return "", chat_history | |
| def retry_last(chat_history, temp, max_tok): | |
| if not chat_history: | |
| return chat_history | |
| last_message = chat_history[-1][0] | |
| chat_history = chat_history[:-1] | |
| bot_message = generate_response(last_message, chat_history, temp, max_tok) | |
| chat_history.append((last_message, bot_message)) | |
| return chat_history | |
| # Wire up events | |
| msg.submit(respond, [msg, chatbot, temperature, max_tokens], [msg, chatbot]) | |
| submit.click(respond, [msg, chatbot, temperature, max_tokens], [msg, chatbot]) | |
| clear.click(lambda: None, None, chatbot, queue=False) | |
| retry.click(retry_last, [chatbot, temperature, max_tokens], chatbot) | |
| # ================================================================ | |
| # LAUNCH | |
| # ================================================================ | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", # Allow external access | |
| server_port=7860, # Default Gradio port | |
| share=False, # Set to True to create public link | |
| show_error=True | |
| ) | |