Spaces:
Sleeping
Sleeping
File size: 7,435 Bytes
6d47efb c01ee83 6d47efb c12f762 6d47efb c01ee83 6d47efb c12f762 6d47efb c12f762 6d47efb c12f762 a6f3e4f c12f762 6d47efb c12f762 6d47efb c01ee83 6d47efb c01ee83 6d47efb c01ee83 6d47efb c01ee83 6d47efb c01ee83 6d47efb c01ee83 6d47efb c01ee83 6d47efb e55b32e 6d47efb e55b32e 6d47efb e7c6dac 6d47efb e171f70 6d47efb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
### Google Docs API (Fetching Text)
import os.path, json
from typing import List
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from langchain.schema import Document
SCOPES = [
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/documents.readonly",
]
### Google Docs File IDs:
INITIAL_FILEID = "1xWRgZ4c6BhBV97WniRY5vIWTyGlQSMljjXggKt3jfIY"
TECHNICAL_SEO_FILEID = "1HGt1K9AbFz1GwY6jzQiVGmqZwgP6zHPwDotGD1d8bHU"
CONTENT_WRITING_FILEID = "1IdSXZwKeMo4su80s3sn4iSEQ18pvXDBsFW_uobj4zBQ"
CONTENT_MARKETING_FILEID = "11QfPGe2XY57RL764FoeN68lI1LvjOdwOM5pDaxyC3O8"
LOCAL_SEO_FILEID = "1uc4qH5roh6_xzv5x4osZG7nrPHpPUqRz9qPn31Y1azY"
creds = None
### Google Drive Authentication with Service Account(Google Cloud)
def auth_google_docs():
service_account_info = json.loads(os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON"))
creds =service_account.Credentials.from_service_account_info(
service_account_info,
scopes=SCOPES
)
return creds
### Previous Logic (Local Development)
# def auth_google_docs():
# global creds
# if DOCS_TOKEN:
# try:
# token_json = json.loads(DOCS_TOKEN)
# creds = Credentials.from_authorized_user_file(token_json, SCOPES)
# except Exception:
# creds = None
# if not creds or not creds.valid:
# if creds and creds.expired and creds.refresh_token:
# creds.refresh(Request())
# else:
# if DOCS_CREDENTIALS:
# client_config_json = json.loads(DOCS_CREDENTIALS)
# flow = InstalledAppFlow.run_local_server(
# client_config_json, SCOPES, port=0
# )
# creds = flow.run_console()
# return creds
def get_docs_text() -> List[Document]:
creds = auth_google_docs()
### Build Google Docs Service
service = build("docs", "v1", credentials=creds)
doc_ids = [
INITIAL_FILEID, TECHNICAL_SEO_FILEID, CONTENT_WRITING_FILEID,
CONTENT_MARKETING_FILEID, LOCAL_SEO_FILEID
]
docs = []
for doc_id in doc_ids:
try:
doc = service.documents().get(documentId=doc_id).execute()
title = doc["title"]
elements = doc["body"]["content"]
text = ""
for elem in elements:
if "paragraph" in elem:
for run in elem["paragraph"]["elements"]:
if "textRun" in run:
text += run["textRun"]["content"]
### Pydantic Document for each doc
docs.append(
Document(
page_content=text,
metadata={"title": title, "id": doc_id}
)
)
except Exception as e:
print(f"{doc_id} not found: {e}")
return docs
### Langchain LCEL (RAG) Implementation
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_chroma import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain_core.output_parsers import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from dotenv import load_dotenv
OPENAI_MODEL_ID ="gpt-5-mini"
db_path = "vector_db"
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
def load_docs() -> List[Document]:
docs: List[Document] = get_docs_text()
return docs
# print("Number of Docs: ", len(load_docs()))
def split_text():
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800, chunk_overlap=100,
separators=["\n\n", "\n", ". ", " ", ""]
)
return text_splitter.split_documents(load_docs())
### Sanity check:
# chunks = split_text()
# print(f"Total Chunks: {len(chunks)}")
# for c in chunks[:10]:
# print(f"Meta: {c.metadata} | LENGTH: {len(c.page_content)}")
def vectorize_text(batch_size: int = 100):
embeddings = OpenAIEmbeddings(api_key=OPENAI_API_KEY)
all_docs: List[Document] = split_text()
vector_store = None
for i in range(0, len(all_docs), batch_size):
batch = all_docs[i:i+batch_size]
if vector_store is None:
vector_store = Chroma.from_documents(
documents=batch,
embedding=embeddings,
persist_directory=db_path
)
else:
vector_store.add_documents(batch)
return vector_store
# print("Embedded Text: ", vectorize_text())
def get_prompt() -> ChatPromptTemplate:
system_template = """You are a top-tier SEO strategist helping with small business SEO.
You are best recommended to use the provided context(retrieved documents) for your responses
unless you need extra resources outside for more comprehensive or insightful advice.
You should be concise and professional.
Make sure to format headings and sub headings in bold and approppriate heading element for each in your markdown responses:
example: heading and sub headings in **bold** and H2/H3/H4 in an appropriate format.
Below is the context:
<context>
{context}
<context>
"""
prompt = ChatPromptTemplate.from_messages([
("system", system_template),
('human', "{question}")
])
return prompt
def chain_rag_elements(vector_store, prompt):
llm = ChatOpenAI(model=OPENAI_MODEL_ID, streaming=True)
retriever = vector_store.as_retriever(search_kwargs={"k": 3})
parser = StrOutputParser()
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| parser
)
return rag_chain
# query = "What is SEO?"
# chain = chain_rag_elements()
# for chunk in chain.stream(query):
# print(chunk, end="", flush=True)
### Controller
def build_rag_workflow(embeddings=OpenAIEmbeddings(api_key=OPENAI_API_KEY)):
### Instantiate vector store that vectorizes text
### from Google Docs(as the externa resource for RAG vector store)
if os.path.exists(db_path) and os.listdir(path=db_path):
print("Loading existing vertor store...")
vector_db = Chroma(
persist_directory=db_path,
embedding_function=embeddings
)
else:
vector_db = vectorize_text()
# print(vector_store._collection.count())
### Chain all necessary elements into the RAG chain
template = get_prompt()
rag_chain = chain_rag_elements(vector_db, template)
return rag_chain
### UI (Gradio)
import gradio as gr
chain = build_rag_workflow()
def chat(query, history):
response = ""
for chunk in chain.stream(query):
response += chunk
yield response
demo = gr.ChatInterface(
fn=chat, type="messages", title="SEO Expert Bot",
description="SEO specialist chatbot, built in a RAG system using 1000 pages volume of SEO hack documents as an external data resource(vector db)"
)
if __name__ == "__main__":
port = int(os.getenv("PORT", 7860))
demo.launch(server_name="0.0.0.0", server_port=port)
|