Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| import pandas as pd | |
| import numpy as np | |
| import os | |
| from datetime import datetime | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| from plotly.subplots import make_subplots | |
| # Initialize the classification pipelines | |
| sentiment_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli") | |
| # Define various classification labels | |
| SENTIMENT_LABELS = ["positive", "negative", "neutral"] | |
| URGENCY_LABELS = ["critical", "high", "medium", "low"] | |
| BRAND_IMPACT_LABELS = ["severe", "moderate", "minimal"] | |
| ISSUE_CATEGORIES = ["product", "service", "security", "fraud", "compliance", "technical", "billing", "general"] | |
| # Keywords for critical issue detection | |
| CRITICAL_KEYWORDS = { | |
| 'security': ['hack', 'breach', 'leaked', 'stolen', 'unauthorized', 'privacy'], | |
| 'fraud': ['scam', 'fraud', 'fake', 'unauthorized charge', 'stolen'], | |
| 'compliance': ['lawsuit', 'legal', 'regulation', 'complaint', 'policy violation'], | |
| 'sensitive': ['racist', 'discrimination', 'harassment', 'abuse', 'offensive'] | |
| } | |
| def create_charts(df): | |
| """ | |
| Create visualization charts using Plotly | |
| """ | |
| # Create subplot figure | |
| fig = make_subplots( | |
| rows=2, cols=2, | |
| subplot_titles=("Urgency Distribution", "Sentiment Analysis", | |
| "Brand Impact Assessment", "Critical Issues Breakdown"), | |
| specs=[[{"type": "pie"}, {"type": "pie"}], | |
| [{"type": "pie"}, {"type": "bar"}]] | |
| ) | |
| # 1. Urgency Distribution Pie Chart | |
| urgency_counts = df['urgency'].value_counts() | |
| fig.add_trace( | |
| go.Pie(labels=urgency_counts.index, | |
| values=urgency_counts.values, | |
| marker=dict(colors=['#ff0000', '#ff6666', '#ffcccc', '#ffe6e6'])), | |
| row=1, col=1 | |
| ) | |
| # 2. Sentiment Analysis Pie Chart | |
| sentiment_counts = df['sentiment'].value_counts() | |
| fig.add_trace( | |
| go.Pie(labels=sentiment_counts.index, | |
| values=sentiment_counts.values, | |
| marker=dict(colors=['#00cc00', '#ff0000', '#cccccc'])), | |
| row=1, col=2 | |
| ) | |
| # 3. Brand Impact Pie Chart | |
| impact_counts = df['brand_impact'].value_counts() | |
| fig.add_trace( | |
| go.Pie(labels=impact_counts.index, | |
| values=impact_counts.values, | |
| marker=dict(colors=['#ff0000', '#ff9933', '#ffcc00'])), | |
| row=2, col=1 | |
| ) | |
| # 4. Critical Issues Bar Chart | |
| critical_issues = df['critical_issues'].str.split('|', expand=True).stack() | |
| critical_counts = critical_issues[critical_issues != 'none'].value_counts() | |
| fig.add_trace( | |
| go.Bar(x=critical_counts.index, | |
| y=critical_counts.values, | |
| marker_color='#ff0000'), | |
| row=2, col=2 | |
| ) | |
| # Update layout | |
| fig.update_layout( | |
| height=800, | |
| showlegend=True, | |
| title_text="Social Media Analysis Dashboard", | |
| title_x=0.5, | |
| title_font_size=20, | |
| paper_bgcolor='rgba(0,0,0,0)', | |
| plot_bgcolor='rgba(0,0,0,0)' | |
| ) | |
| # Save the figure | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| chart_file = f"analysis_dashboard_{timestamp}.html" | |
| fig.write_html(chart_file) | |
| return chart_file | |
| def classify_text(text, labels, classifier): | |
| """ | |
| Perform zero-shot classification on text | |
| """ | |
| result = classifier(text, labels) | |
| return result['labels'][0], result['scores'][0] | |
| def detect_critical_issues(text): | |
| """ | |
| Detect critical issues based on keywords | |
| """ | |
| text_lower = text.lower() | |
| issues = [] | |
| for category, keywords in CRITICAL_KEYWORDS.items(): | |
| if any(keyword in text_lower for keyword in keywords): | |
| issues.append(category) | |
| return issues | |
| def determine_urgency(text, sentiment, critical_issues): | |
| """ | |
| Determine urgency level based on content, sentiment, and critical issues | |
| """ | |
| if critical_issues: | |
| return "critical" | |
| elif "!" in text or "?" in text or any(word in text.lower() for word in ['urgent', 'asap', 'immediately']): | |
| return "high" | |
| elif sentiment == "negative": | |
| return "medium" | |
| else: | |
| return "low" | |
| def analyze_brand_impact(text, sentiment, critical_issues): | |
| """ | |
| Analyze potential brand impact | |
| """ | |
| if critical_issues or sentiment == "negative" and ("share" in text.lower() or "viral" in text.lower()): | |
| return "severe" | |
| elif sentiment == "negative": | |
| return "moderate" | |
| else: | |
| return "minimal" | |
| def generate_recommendations(row): | |
| """ | |
| Generate actionable recommendations based on analysis | |
| """ | |
| recommendations = [] | |
| if row['urgency'] == 'critical': | |
| recommendations.append("π¨ IMMEDIATE ESCALATION REQUIRED - Route to crisis management team") | |
| if 'security' in row['critical_issues']: | |
| recommendations.append("π Engage security team for immediate investigation") | |
| elif 'fraud' in row['critical_issues']: | |
| recommendations.append("β οΈ Route to fraud prevention team for investigation") | |
| elif 'compliance' in row['critical_issues']: | |
| recommendations.append("π Escalate to legal/compliance team for review") | |
| if row['brand_impact'] == 'severe': | |
| recommendations.append("π’ Engage PR team for reputation management strategy") | |
| if row['sentiment'] == 'negative': | |
| recommendations.append("π₯ Priority customer outreach needed for resolution") | |
| return ' | '.join(recommendations) if recommendations else "β Standard response protocol" | |
| def process_csv(file): | |
| """ | |
| Process posts from CSV file with enhanced analysis | |
| """ | |
| try: | |
| # Read the input CSV file | |
| df = pd.read_csv(file.name) | |
| # Verify required columns | |
| if 'post_id' not in df.columns or 'text' not in df.columns: | |
| return None, None, "Error: CSV must contain 'post_id' and 'text' columns" | |
| # Perform comprehensive analysis | |
| analysis_results = [] | |
| for _, row in df.iterrows(): | |
| text = row['text'] | |
| # Basic sentiment analysis | |
| sentiment, sentiment_score = classify_text(text, SENTIMENT_LABELS, sentiment_classifier) | |
| # Detect critical issues | |
| critical_issues = detect_critical_issues(text) | |
| # Determine urgency and brand impact | |
| urgency = determine_urgency(text, sentiment, critical_issues) | |
| brand_impact = analyze_brand_impact(text, sentiment, critical_issues) | |
| # Store results | |
| analysis_results.append({ | |
| 'post_id': row['post_id'], | |
| 'text': text, | |
| 'sentiment': sentiment, | |
| 'sentiment_confidence': round(sentiment_score, 3), | |
| 'urgency': urgency, | |
| 'brand_impact': brand_impact, | |
| 'critical_issues': '|'.join(critical_issues) if critical_issues else 'none', | |
| }) | |
| # Create results DataFrame | |
| results_df = pd.DataFrame(analysis_results) | |
| # Generate recommendations | |
| results_df['recommendations'] = results_df.apply(generate_recommendations, axis=1) | |
| # Create visualization dashboard | |
| dashboard_file = create_charts(results_df) | |
| # Add analysis timestamp | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| output_file = f"social_media_analysis_{timestamp}.csv" | |
| # Save results | |
| results_df.to_csv(output_file, index=False) | |
| # Generate summary statistics | |
| total_posts = len(results_df) | |
| critical_posts = len(results_df[results_df['urgency'] == 'critical']) | |
| negative_sentiment = len(results_df[results_df['sentiment'] == 'negative']) | |
| severe_impact = len(results_df[results_df['brand_impact'] == 'severe']) | |
| summary = f""" | |
| π― Real-Time Social Media Intelligence Report | |
| ========================================== | |
| π Key Metrics: | |
| ------------- | |
| Total Posts Analyzed: {total_posts} | |
| Critical Issues Requiring Immediate Attention: {critical_posts} | |
| Negative Sentiment Posts: {negative_sentiment} | |
| Severe Brand Impact Posts: {severe_impact} | |
| β‘ Quick Actions Required: | |
| ---------------------- | |
| - {critical_posts} posts need immediate escalation | |
| - {severe_impact} posts require PR team intervention | |
| - {negative_sentiment} posts need customer satisfaction follow-up | |
| π‘ AI-Powered Analysis Complete: | |
| ---------------------------- | |
| Detailed analysis saved to: {output_file} | |
| Interactive dashboard saved to: {dashboard_file} | |
| """ | |
| return output_file, dashboard_file, summary | |
| except Exception as e: | |
| return None, None, f"Error processing CSV: {str(e)}" | |
| # Create example CSV file with more diverse cases | |
| def create_example_file(): | |
| """ | |
| Create an example CSV file for demonstration | |
| """ | |
| example_data = { | |
| 'post_id': range(1, 11), | |
| 'text': [ | |
| "Just experienced a major security breach! My account was hacked and sensitive data leaked. This is unacceptable! #cybersecurity #breach", | |
| "Thank you for the amazing customer service! The team went above and beyond to help me. Truly impressed! π", | |
| "Your latest app update is constantly crashing. Can't access my account for 3 days now. Fix this ASAP!", | |
| "Noticed some suspicious charges on my account. Possible fraud? Need immediate assistance! π¨", | |
| "Love the new features you've added! Makes my work so much easier. Keep innovating! π", | |
| "Planning to file a legal complaint due to repeated policy violations. This needs attention.", | |
| "System down again? This is the third time this week. Considering switching to your competitor.", | |
| "Your product has completely transformed our business operations. Best investment ever! π", | |
| "Experiencing discrimination from your staff. This is unacceptable and I'm reporting it.", | |
| "Warning to others: Potential scam detected in recent transactions. Be careful!" | |
| ] | |
| } | |
| df = pd.DataFrame(example_data) | |
| example_file = "example_input.csv" | |
| df.to_csv(example_file, index=False) | |
| return example_file | |
| # Create the example file | |
| example_file = create_example_file() | |
| # Create Gradio interface with custom theme | |
| theme = gr.themes.Base( | |
| primary_hue="red", | |
| secondary_hue="red", | |
| ) | |
| css = """ | |
| .gradio-container { | |
| background: linear-gradient(to bottom right, #ffffff, #ffecec); | |
| } | |
| """ | |
| # Create Gradio interface | |
| iface = gr.Interface( | |
| fn=process_csv, | |
| inputs=[ | |
| gr.File( | |
| label="Upload CSV File π", | |
| file_types=[".csv"] | |
| ) | |
| ], | |
| outputs=[ | |
| gr.File(label="Download Detailed Analysis Report π"), | |
| gr.File(label="Download Interactive Dashboard π"), | |
| gr.Textbox(label="Real-Time Analysis Summary π±", max_lines=15) | |
| ], | |
| title="π NoCode Ninjas: AI-Powered Social Media Intelligence Platform", | |
| description=""" | |
| ### Enterprise-Grade Social Media Analytics with Advanced AI | |
| Transform your social media monitoring with our cutting-edge AI analysis platform: | |
| π― **Real-Time Sentiment Analysis** | |
| π **Urgent Issue Detection** | |
| β‘ **Instant Crisis Alerts** | |
| π **Brand Impact Assessment** | |
| π€ **AI-Driven Recommendations** | |
| *Trusted by leading brands for proactive social media management and crisis prevention.* | |
| """, | |
| examples=[ | |
| [example_file] | |
| ], | |
| theme=theme, | |
| css=css | |
| ) | |
| if __name__ == "__main__": | |
| try: | |
| iface.launch() | |
| finally: | |
| if os.path.exists(example_file): | |
| os.remove(example_file) |