File size: 1,945 Bytes
d131e81
5b21d14
9397cee
5b21d14
 
9397cee
 
 
 
5b21d14
9397cee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d131e81
5b21d14
 
 
 
 
 
 
 
 
 
 
9397cee
5b21d14
 
 
 
 
 
 
 
 
 
 
 
 
d131e81
 
5b21d14
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import gradio as gr
from llama_cpp import Llama
from huggingface_hub import snapshot_download
import os

# Baixe o GGUF direto do Hub no runtime (pula limite de 1GB upload)
MODEL_REPO = "mradermacher/DeepHat-V1-7B-GGUF"
MODEL_FILE = "DeepHat-V1-7B.Q4_K_M.gguf"  # ~4.8GB, baixa uma vez e cacheia
LOCAL_PATH = "./models/"  # Pasta local no Space

# Função pra carregar modelo (roda na init)
def load_model():
    os.makedirs(LOCAL_PATH, exist_ok=True)
    model_path = snapshot_download(
        repo_id=MODEL_REPO,
        filename=MODEL_FILE,
        local_dir=LOCAL_PATH,
        local_dir_use_symlinks=False  # Evita links quebrados
    )
    full_path = os.path.join(model_path, MODEL_FILE)
    
    llm = Llama(
        model_path=full_path,
        n_ctx=2048,
        n_threads=4,
        verbose=False
    )
    return llm

# Carregue na init (leva ~5-10 min na primeira build, depois cache)
print("Baixando DeepHat... (pode demorar na CPU)")
llm = load_model()

def generate_response(prompt, max_tokens=500):
    full_prompt = f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
    output = llm(
        full_prompt,
        max_tokens=max_tokens,
        temperature=0.7,
        top_p=0.9,
        stop=["<|im_end|>", "</s>"]
    )
    return output['choices'][0]['text'].strip()

# Interface Gradio
with gr.Blocks(title="DeepHat Uncensored Chat") as demo:
    gr.Markdown("# DeepHat - IA Uncensored pra Cibersegurança & Hacking Ético")
    chatbot = gr.Chatbot()
    msg = gr.Textbox(placeholder="Pergunte sobre hacking WiFi, pentest ou censura...")
    clear = gr.Button("Clear")

    def respond(message, chat_history):
        bot_message = generate_response(message)
        chat_history.append((message, bot_message))
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

if __name__ == "__main__":
    demo.launch()