huihui-ai commited on
Commit
7ea6d8a
·
verified ·
1 Parent(s): 5d5dbaa

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +144 -3
README.md CHANGED
@@ -1,3 +1,144 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
4
+ tags:
5
+ - text-generation-inference
6
+ - transformers
7
+ - unsloth
8
+ - abliterated
9
+ - uncensored
10
+ library_name: transformers
11
+ ---
12
+
13
+ # huihui-ai/DeepSeek-R1-Distill-Qwen-1.5B-abliterated
14
+
15
+ This is an uncensored version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) reasoning model that has been post-trained by huihui-ai.
16
+
17
+ Please refer to [SFT with Unsloth](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb#scrollTo=2ejIt2xSNKKp) for the training method.
18
+
19
+ This is a test conducted through fine-tuning for ablation to achieve the purpose of being uncensored, and the test results met the expected outcomes.
20
+
21
+ ## Use with ollama
22
+
23
+ You can use [huihui_ai/deepseek-r1-abliterated](https://ollama.com/huihui_ai/deepseek-r1-abliterated) directly
24
+ ```
25
+ ollama run huihui_ai/deepseek-r1-abliterated:1.5b
26
+ ```
27
+
28
+ ## Use with transformers
29
+
30
+ ```
31
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextStreamer
32
+ import torch
33
+ import os
34
+ import signal
35
+
36
+ cpu_count = os.cpu_count()
37
+ print(f"Number of CPU cores in the system: {cpu_count}")
38
+ half_cpu_count = cpu_count // 2
39
+ os.environ["MKL_NUM_THREADS"] = str(half_cpu_count)
40
+ os.environ["OMP_NUM_THREADS"] = str(half_cpu_count)
41
+ torch.set_num_threads(half_cpu_count)
42
+
43
+ print(f"PyTorch threads: {torch.get_num_threads()}")
44
+ print(f"MKL threads: {os.getenv('MKL_NUM_THREADS')}")
45
+ print(f"OMP threads: {os.getenv('OMP_NUM_THREADS')}")
46
+
47
+ # Load the model and tokenizer
48
+ NEW_MODEL_ID = "huihui-ai/Qwen2.5-0.5B-Instruct-abliterated-SFT"
49
+ print(f"Load Model {NEW_MODEL_ID} ... ")
50
+ quant_config_4 = BitsAndBytesConfig(
51
+ load_in_4bit=True,
52
+ bnb_4bit_compute_dtype=torch.bfloat16,
53
+ bnb_4bit_use_double_quant=True,
54
+ llm_int8_enable_fp32_cpu_offload=True,
55
+ )
56
+
57
+ model = AutoModelForCausalLM.from_pretrained(
58
+ NEW_MODEL_ID,
59
+ device_map="auto",
60
+ trust_remote_code=True,
61
+ #quantization_config=quant_config_4,
62
+ torch_dtype=torch.bfloat16
63
+ )
64
+ tokenizer = AutoTokenizer.from_pretrained(NEW_MODEL_ID, trust_remote_code=True)
65
+ if tokenizer.pad_token is None:
66
+ tokenizer.pad_token = tokenizer.eos_token
67
+ tokenizer.pad_token_id = tokenizer.eos_token_id
68
+
69
+ initial_messages = [{"role": "system", "content": "You are a helpful assistant."}]
70
+ messages = initial_messages.copy()
71
+
72
+ class CustomTextStreamer(TextStreamer):
73
+ def __init__(self, tokenizer, skip_prompt=True, skip_special_tokens=True):
74
+ super().__init__(tokenizer, skip_prompt=skip_prompt, skip_special_tokens=skip_special_tokens)
75
+ self.generated_text = ""
76
+ self.stop_flag = False
77
+
78
+ def on_finalized_text(self, text: str, stream_end: bool = False):
79
+ self.generated_text += text
80
+ print(text, end="", flush=True)
81
+ if self.stop_flag:
82
+ raise StopIteration
83
+
84
+ def stop_generation(self):
85
+ self.stop_flag = True
86
+
87
+ def generate_stream(model, tokenizer, messages, max_new_tokens):
88
+ input_ids = tokenizer.apply_chat_template(
89
+ messages,
90
+ tokenize=True,
91
+ add_generation_prompt=True,
92
+ return_tensors="pt"
93
+ )
94
+ attention_mask = torch.ones_like(input_ids, dtype=torch.long)
95
+ tokens = input_ids.to(model.device)
96
+ attention_mask = attention_mask.to(model.device)
97
+
98
+ streamer = CustomTextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
99
+
100
+ def signal_handler(sig, frame):
101
+ streamer.stop_generation()
102
+ print("\n[Generation stopped by user with Ctrl+C]")
103
+
104
+ signal.signal(signal.SIGINT, signal_handler)
105
+
106
+ print("Response: ", end="", flush=True)
107
+ try:
108
+ generated_ids = model.generate(
109
+ tokens,
110
+ attention_mask=attention_mask,
111
+ use_cache=False,
112
+ max_new_tokens=max_new_tokens,
113
+ do_sample=True,
114
+ pad_token_id=tokenizer.pad_token_id,
115
+ streamer=streamer
116
+ )
117
+ del generated_ids
118
+ except StopIteration:
119
+ print("\n[Stopped by user]")
120
+
121
+ del input_ids, attention_mask
122
+ torch.cuda.empty_cache()
123
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
124
+
125
+ return streamer.generated_text, streamer.stop_flag
126
+
127
+ while True:
128
+ user_input = input("\nUser: ").strip()
129
+ if user_input.lower() == "/exit":
130
+ print("Exiting chat.")
131
+ break
132
+ if user_input.lower() == "/clear":
133
+ messages = initial_messages.copy()
134
+ print("Chat history cleared. Starting a new conversation.")
135
+ continue
136
+ if not user_input:
137
+ print("Input cannot be empty. Please enter something.")
138
+ continue
139
+ messages.append({"role": "user", "content": user_input})
140
+ response, stop_flag = generate_stream(model, tokenizer, messages, 8192)
141
+ if stop_flag:
142
+ continue
143
+ messages.append({"role": "assistant", "content": response})
144
+ ```