Phil2Sat commited on
Commit
afe35e6
·
verified ·
1 Parent(s): 6b7ce93

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Qwen2.5-VL-7B-Instruct-abliterated/Qwen2.5-VL-7B-Instruct-abliterated.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Qwen2.5-VL-7B-Instruct-abliterated/Qwen2.5-VL-7B-Instruct-abliterated.mmproj-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
38
+ v52/qwen-rapid-nsfw-v5.2-F16.gguf filter=lfs diff=lfs merge=lfs -text
39
+ v52/qwen-rapid-nsfw-v5.2-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
40
+ v52/qwen-rapid-nsfw-v5.2-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
41
+ v52/qwen-rapid-nsfw-v5.2-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
42
+ v52/qwen-rapid-nsfw-v5.2-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
43
+ v52/qwen-rapid-nsfw-v5.2-Q4_1.gguf filter=lfs diff=lfs merge=lfs -text
44
+ v52/qwen-rapid-nsfw-v5.2-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ v52/qwen-rapid-nsfw-v5.2-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ v52/qwen-rapid-nsfw-v5.2-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
47
+ v52/qwen-rapid-nsfw-v5.2-Q5_1.gguf filter=lfs diff=lfs merge=lfs -text
48
+ v52/qwen-rapid-nsfw-v5.2-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
49
+ v52/qwen-rapid-nsfw-v5.2-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
50
+ v52/qwen-rapid-nsfw-v5.2-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
51
+ v52/qwen-rapid-nsfw-v5.2-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Qwen2.5-VL-7B-Instruct-abliterated/Qwen2.5-VL-7B-Instruct-abliterated.Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:669a5604c47c90c20110c6db5fe10ad7e8ec99b553a785d7d20492f7d5b3e7d0
3
+ size 8098525600
Qwen2.5-VL-7B-Instruct-abliterated/Qwen2.5-VL-7B-Instruct-abliterated.mmproj-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ab4e60dd3e174f3d2d6d0c0979c058827699d0085cdcbadada0a5c609ec43f
3
+ size 853119744
caption/caption.py CHANGED
@@ -1,44 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from pathlib import Path
2
  from llama_cpp import Llama
3
  from llama_cpp.llama_chat_format import Qwen25VLChatHandler
4
  import gradio as gr
5
  import base64
 
 
 
6
 
7
- # Globale Variable für das Modell
8
  model = None
 
9
 
10
- def initialize_model():
11
- """Modell initialisieren"""
12
- global model
13
- if model is None:
14
- llm_model_path = Path("/daten/models/text_encoders/Qwen2.5-VL-7B-Instruct-abliterated.Q8_0.gguf")
15
- mmproj_model_path = Path("/daten/models/text_encoders/Qwen2.5-VL-7B-Instruct-abliterated.mmproj-Q8_0.gguf")
16
-
17
- if llm_model_path.exists() and mmproj_model_path.exists():
18
- print("Loading Qwen-VL model...")
19
- chat_handler = Qwen25VLChatHandler(clip_model_path=str(mmproj_model_path)) # Hier war der Fehler!
20
- model = Llama(
21
- model_path=str(llm_model_path),
22
- chat_handler=chat_handler,
23
- n_ctx=4096,
24
- n_gpu_layers=-1,
25
- verbose=False
26
- )
27
- print("Model loaded successfully!")
28
- else:
29
- raise Exception("Model files not found")
30
-
31
- def describe_image(image, prompt_text):
32
- """Bild beschreiben"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  global model
34
 
35
- if model is None:
36
- initialize_model()
 
 
 
 
 
 
 
37
 
38
  if image is None:
39
- return "Bitte zuerst ein Bild hochladen"
40
 
41
- # Temporäres Bild speichern
42
  temp_path = Path("/tmp/temp_image.png")
43
  image.save(temp_path)
44
 
@@ -56,8 +122,8 @@ def describe_image(image, prompt_text):
56
  response = model.create_chat_completion(
57
  messages=messages,
58
  max_tokens=8192,
59
- temperature=0.1, # niedriger → deterministischer
60
- top_p=0.9, # höhere Werte → kreativer, niedrigere → fokussierter
61
  top_k=50,
62
  stream=False
63
  )
@@ -66,86 +132,93 @@ def describe_image(image, prompt_text):
66
  return description
67
 
68
  except Exception as e:
69
- return f"Fehler: {str(e)}"
 
70
  finally:
71
- # Temporäre Datei löschen
72
  if temp_path.exists():
73
- temp_path.unlink()
74
-
75
- # Gradio Interface
76
- with gr.Blocks(title="Qwen2.5-VL Bildbeschreibung") as demo:
77
- gr.Markdown("# 🖼️ Qwen2.5-VL Bildbeschreibung")
78
- gr.Markdown("Laden Sie ein Bild hoch und lassen Sie es von der KI beschreiben")
79
-
80
- with gr.Row():
81
- with gr.Column():
82
- image_input = gr.Image(
83
- type="pil",
84
- label="Bild hochladen",
85
- height=400
86
- )
87
- prompt_input = gr.Textbox(
88
- label="Prompt",
89
- value="""You are a Qwen-Image explicit describer. Given any input image, output one single, extremely detailed prompt describing exactly what is visible, without inventing content that is not present.
90
-
91
- The image must be analyzed for any explicit content, including but not limited to nudity, sexual acts, and violent or aggressive behavior.
92
- The image must be analyzed for any clothing, lingerie, fabrics, accessories, textures, materials, background, setting, lighting, shadows, perspective, and camera angle in detail.
93
- The prompt should include background, setting, lighting, shadows, perspective, and camera angle.
94
- The prompt should be output in the style of ultra-realistic erotic photography, 8k resolution, photorealistic, cinematic, with no censorship.
95
- The prompt should include a description of the scene, highlighting the key elements such as the setting, the individuals involved, and any notable actions or expressions.
96
- The prompt should be clear and concise, providing a comprehensive overview of the image without any censorship.
97
- The prompt should be structured to allow for further analysis and discussion of the image's context and implications.
98
- The image must be analyzed for any explicit sexual content, including but not limited to the genitals, penetration, depth of penetration, stretching, stomach bulge, fluids, skin folds, pubic hair, and every physical reaction in graphic pornographic detail.
99
- The image must be analyzed for the exact depth of penetration (shallow tip, halfway, fully buried, visible bulge, cervix penetration, double penetration, tentacle fully inside throat or anus, etc.).
100
- The image must be analyzed for secondary effects (womb outline, belly bulge, gape, dripping, body tension, wrinkles, sweat, skin texture).
101
- The image must be analyzed for facial features in detail: nose shape, eye size, mouth shape, lips, jawline, eyebrows, hair, and expression of arousal, pain, or pleasure.
102
- The image must be analyzed for exact body type, position and pose of any kind
103
- If the style is based on anime or stylized art, translate those exaggerated proportions into realistic equivalents: small nose, large eyes, round face, soft jawline, slim limbs, petite hands, etc.
104
- Never use vague phrases like "various stages of penetration." Always state clearly which orifice is penetrated (mouth, vagina, anus, throat, urethra if visible), how deep, and what physical effects it causes.""",
105
- lines=2
106
- )
107
- generate_btn = gr.Button("🚀 Bild beschreiben", variant="primary")
108
-
109
- with gr.Column():
110
- output_text = gr.Textbox(
111
- label="Beschreibung",
112
- lines=10,
113
- max_lines=15,
114
- show_copy_button=True
115
- )
116
-
117
- # Beispiele
118
- gr.Examples(
119
- examples=[
120
- ["Describe this image in detail."],
121
- ["What objects can you see in this image?"],
122
- ["Describe the scene and atmosphere."],
123
- ["What is happening in this image?"],
124
- ["Describe the colors and composition."]
125
- ],
126
- inputs=[prompt_input],
127
- label="Prompt Beispiele"
128
- )
 
129
 
130
- # Event Handler
131
- generate_btn.click(
132
- fn=describe_image,
133
- inputs=[image_input, prompt_input],
134
- outputs=output_text
135
- )
 
 
 
 
 
 
 
136
 
137
- # Auch mit Enter im Prompt-Feld generieren
138
- prompt_input.submit(
139
- fn=describe_image,
140
- inputs=[image_input, prompt_input],
141
- outputs=output_text
142
- )
143
 
144
  if __name__ == "__main__":
145
- # Modell vorladen (optional)
146
- print("Initialisiere Gradio Interface...")
 
 
 
 
 
147
  demo.launch(
148
- server_name="0.0.0.0", # Für externen Zugriff
149
  server_port=7860,
150
- share=False # Auf True setzen für öffentliche URL
151
  )
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Comprehensive Gradio app for image description using Qwen2.5-VL (llama-cpp-python).
4
+ This file keeps the exact Gradio layout/positions/formatting from your original script,
5
+ but moves default prompts and model paths into external JSON files:
6
+ - model_config.json -> contains an array of model entries (for dropdown selection)
7
+ - prompts.json -> contains default_prompt, prompt_examples, and any other prompt defaults
8
+
9
+ Important:
10
+ - Do NOT change the UI layout or element order (keeps column/row structure identical).
11
+ - The app language (UI labels & markdown) is fully in English, per request.
12
+ - All major blocks are documented extensively below to help anyone understand and maintain.
13
+ """
14
+
15
  from pathlib import Path
16
  from llama_cpp import Llama
17
  from llama_cpp.llama_chat_format import Qwen25VLChatHandler
18
  import gradio as gr
19
  import base64
20
+ import json
21
+ import os
22
+ from typing import Dict, Any
23
 
24
+ # --- Global model handle (singleton) ---
25
  model = None
26
+ _current_model_key = None
27
 
28
+ # --- Paths to external config files ---
29
+ MODEL_CONFIG_PATH = Path("model_config.json")
30
+ PROMPTS_PATH = Path("prompts.json")
31
+
32
+
33
+ def load_json_file(path: Path) -> Dict[str, Any]:
34
+ if not path.exists():
35
+ raise FileNotFoundError(f"Required config file not found: {path}")
36
+ with path.open("r", encoding="utf-8") as f:
37
+ return json.load(f)
38
+
39
+
40
+ def get_model_list() -> Dict[str, Dict[str, str]]:
41
+ cfg = load_json_file(MODEL_CONFIG_PATH)
42
+ models = cfg.get("models", [])
43
+ mapping = {}
44
+ for m in models:
45
+ display = m.get("display_name") or m.get("model_key")
46
+ mapping[display] = m
47
+ return mapping
48
+
49
+
50
+ def get_prompts() -> Dict[str, Any]:
51
+ return load_json_file(PROMPTS_PATH)
52
+
53
+
54
+ def initialize_model_from_config(selected_display_name: str):
55
+ global model, _current_model_key
56
+
57
+ models = get_model_list()
58
+ if selected_display_name not in models:
59
+ raise ValueError(f"Selected model '{selected_display_name}' not found in model_config.json")
60
+
61
+ entry = models[selected_display_name]
62
+ model_key = entry.get("model_key", selected_display_name)
63
+
64
+ if model is not None and _current_model_key == model_key:
65
+ return
66
+
67
+ llm_model_path = Path(entry["llm_model_path"])
68
+ mmproj_model_path = Path(entry["mmproj_model_path"])
69
+
70
+ if llm_model_path.exists() and mmproj_model_path.exists():
71
+ print(f"Loading model '{selected_display_name}' from paths:")
72
+ print(f" llm: {llm_model_path}")
73
+ print(f" mmproj:{mmproj_model_path}")
74
+
75
+ chat_handler = Qwen25VLChatHandler(clip_model_path=str(mmproj_model_path))
76
+ model = Llama(
77
+ model_path=str(llm_model_path),
78
+ chat_handler=chat_handler,
79
+ n_ctx=4096,
80
+ n_gpu_layers=-1,
81
+ verbose=False
82
+ )
83
+ _current_model_key = model_key
84
+ print("Model loaded successfully!")
85
+ else:
86
+ raise FileNotFoundError(
87
+ f"Model files not found for '{selected_display_name}'.\n"
88
+ f"llm exists: {llm_model_path.exists()}, mmproj exists: {mmproj_model_path.exists()}"
89
+ )
90
+
91
+
92
+ def describe_image(image, prompt_text, selected_model_display):
93
  global model
94
 
95
+ if model is None or _current_model_key is None:
96
+ initialize_model_from_config(selected_model_display)
97
+ else:
98
+ if selected_model_display not in get_model_list():
99
+ raise ValueError("Selected model not found in config")
100
+ selected_entry = get_model_list()[selected_model_display]
101
+ if selected_entry.get("model_key") != _current_model_key:
102
+ model = None
103
+ initialize_model_from_config(selected_model_display)
104
 
105
  if image is None:
106
+ return "Please upload an image first"
107
 
 
108
  temp_path = Path("/tmp/temp_image.png")
109
  image.save(temp_path)
110
 
 
122
  response = model.create_chat_completion(
123
  messages=messages,
124
  max_tokens=8192,
125
+ temperature=0.1,
126
+ top_p=0.9,
127
  top_k=50,
128
  stream=False
129
  )
 
132
  return description
133
 
134
  except Exception as e:
135
+ return f"Error: {str(e)}"
136
+
137
  finally:
 
138
  if temp_path.exists():
139
+ try:
140
+ temp_path.unlink()
141
+ except Exception:
142
+ pass
143
+
144
+
145
+ def build_ui():
146
+ prompts = get_prompts()
147
+ models = get_model_list()
148
+ default_prompt_text = prompts.get("default_prompt", "")
149
+ prompt_examples = prompts.get("prompt_examples", [
150
+ "Describe this image in detail.",
151
+ "What objects can you see in this image?",
152
+ "Describe the scene and atmosphere.",
153
+ "What is happening in this image?",
154
+ "Describe the colors and composition."
155
+ ])
156
+
157
+ with gr.Blocks(title="Qwen2.5-VL Image Description") as demo:
158
+ gr.Markdown("# 🖼️ Qwen2.5-VL Image Description")
159
+ gr.Markdown("Upload an image and let the model describe it in detail")
160
+
161
+ with gr.Row():
162
+ with gr.Column():
163
+ model_dropdown = gr.Dropdown(
164
+ label="Model selection",
165
+ choices=list(models.keys()),
166
+ value=list(models.keys())[0] if len(models) > 0 else None
167
+ )
168
+
169
+ image_input = gr.Image(
170
+ type="pil",
171
+ label="Upload image",
172
+ height=400
173
+ )
174
+
175
+ prompt_input = gr.Textbox(
176
+ label="Prompt",
177
+ value=default_prompt_text,
178
+ lines=2
179
+ )
180
+
181
+ generate_btn = gr.Button("🚀 Describe image", variant="primary")
182
+
183
+ with gr.Column():
184
+ output_text = gr.Textbox(
185
+ label="Description",
186
+ lines=20, # doubled
187
+ max_lines=30, # doubled
188
+ show_copy_button=True
189
+ )
190
+
191
+ gr.Examples(
192
+ examples=[[e] for e in prompt_examples],
193
+ inputs=[prompt_input],
194
+ label="Prompt Examples"
195
+ )
196
 
197
+ generate_btn.click(
198
+ fn=describe_image,
199
+ inputs=[image_input, prompt_input, model_dropdown],
200
+ outputs=output_text
201
+ )
202
+
203
+ prompt_input.submit(
204
+ fn=describe_image,
205
+ inputs=[image_input, prompt_input, model_dropdown],
206
+ outputs=output_text
207
+ )
208
+
209
+ return demo
210
 
 
 
 
 
 
 
211
 
212
  if __name__ == "__main__":
213
+ if not MODEL_CONFIG_PATH.exists():
214
+ raise FileNotFoundError(f"Missing {MODEL_CONFIG_PATH}. Please create it (see model_config.json example).")
215
+ if not PROMPTS_PATH.exists():
216
+ raise FileNotFoundError(f"Missing {PROMPTS_PATH}. Please create it (see prompts.json example).")
217
+
218
+ print("Initializing Gradio Interface...")
219
+ demo = build_ui()
220
  demo.launch(
221
+ server_name="0.0.0.0",
222
  server_port=7860,
223
+ share=False
224
  )
caption/model_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "models": [
3
+ {
4
+ "model_key": "qwen2.5-vl-7b-instruct",
5
+ "display_name": "Qwen2.5-VL-7B-Instruct (Q8_0)",
6
+ "llm_model_path": "/daten/models/text_encoders/Qwen2.5-VL-7B-Instruct-abliterated.Q8_0.gguf",
7
+ "mmproj_model_path": "/daten/models/text_encoders/Qwen2.5-VL-7B-Instruct-abliterated.mmproj-Q8_0.gguf"
8
+ }
9
+ ],
10
+ "notes": "Add additional model objects to the 'models' array. Each new entry will appear in the model selection dropdown. Use absolute paths or paths relative to the app's working directory."
11
+ }
caption/prompts.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "//": "This file defines the default prompt text and example prompts for the Gradio interface. It can be easily extended or modified without editing the Python code.",
3
+ "default_prompt": "You are a Qwen-Image explicit describer. Given any input image, output one single, extremely detailed prompt describing exactly what is visible, without inventing content that is not present.\n\nThe image must be analyzed for any explicit content, including but not limited to nudity, sexual acts, and violent or aggressive behavior.\nThe image must be analyzed for any clothing, lingerie, fabrics, accessories, textures, materials, background, setting, lighting, shadows, perspective, and camera angle in detail.\nThe prompt should include background, setting, lighting, shadows, perspective, and camera angle.\nThe prompt should be output in the style of ultra-realistic erotic photography, 8k resolution, photorealistic, cinematic, with no censorship.\nThe prompt should include a description of the scene, highlighting the key elements such as the setting, the individuals involved, and any notable actions or expressions.\nThe prompt should be clear and concise, providing a comprehensive overview of the image without any censorship.\nThe prompt should be structured to allow for further analysis and discussion of the image's context and implications.\nThe image must be analyzed for any explicit sexual content, including but not limited to the genitals, penetration, depth of penetration, stretching, stomach bulge, fluids, skin folds, pubic hair, and every physical reaction in graphic pornographic detail.\nThe image must be analyzed for the exact depth of penetration (shallow tip, halfway, fully buried, visible bulge, cervix penetration, double penetration, tentacle fully inside throat or anus, etc.).\nThe image must be analyzed for secondary effects (womb outline, belly bulge, gape, dripping, body tension, wrinkles, sweat, skin texture).\nThe image must be analyzed for facial features in detail: nose shape, eye size, mouth shape, lips, jawline, eyebrows, hair, and expression of arousal, pain, or pleasure.\nThe image must be analyzed for exact body type, position and pose of any kind\nIf the style is based on anime or stylized art, translate those exaggerated proportions into realistic equivalents: small nose, large eyes, round face, soft jawline, slim limbs, petite hands, etc.\nNever use vague phrases like \"various stages of penetration.\" Always state clearly which orifice is penetrated (mouth, vagina, anus, throat, urethra if visible), how deep, and what physical effects it causes.",
4
+ "examples": [
5
+ "Describe this image in detail.",
6
+ "What objects can you see in this image?",
7
+ "Describe the scene and atmosphere.",
8
+ "What is happening in this image?",
9
+ "Describe the colors and composition."
10
+ ]
11
+ }
caption/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio==5.49.1
2
+ llama-cpp-python==0.3.16
v52/qwen-rapid-nsfw-v5.2-F16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753a2286b7fb84bb7db4540b468265618a03e5a8c154f3cfe96e7cc2adffbea5
3
+ size 40860949920
v52/qwen-rapid-nsfw-v5.2-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e981e61cd620a8b822e3b3b657949b787f3d7fb307a4e51be3a79b62b082da20
3
+ size 7439605152
v52/qwen-rapid-nsfw-v5.2-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ea4a28ec76491987ddf4ceb646b287c7582e7b621da63048f888f5d85ca78f9
3
+ size 10049944992
v52/qwen-rapid-nsfw-v5.2-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eebc131306d75b488486496e28591a11e88e861a271ce58159379cca4f71eba3
3
+ size 9322986912
v52/qwen-rapid-nsfw-v5.2-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:960f271a1d188b4cf68208b19c90e44dbbd9c33aa52565d7806426b58a4bddb0
3
+ size 12204940704
v52/qwen-rapid-nsfw-v5.2-Q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275bdf381074d29fd5bc6189ee40a836c877790fc77c6da5ae06a859730654a3
3
+ size 13158686112
v52/qwen-rapid-nsfw-v5.2-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56365daf9e108a44e4d19c060228c40310162ca86abd4f9609987e637438826
3
+ size 13342416288
v52/qwen-rapid-nsfw-v5.2-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4ce63bfb839cd9de8498674bdfdf355633a7326a3b653bf7afb872ec08558b6
3
+ size 12480978336
v52/qwen-rapid-nsfw-v5.2-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fc74f056d0a03f315eba6496d992dd1feb607dafc0ea08ddcefc6eb868c6116
3
+ size 14659788192
v52/qwen-rapid-nsfw-v5.2-Q5_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88a7124e1e59d7ab891bff3992c5dad3aefcb9c7b4442810c198fdc2fa81d6aa
3
+ size 15613533600
v52/qwen-rapid-nsfw-v5.2-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efa00fa4324591314735c35f1f571f028fefe2380496e110fb01a17f23f9a027
3
+ size 15113067936
v52/qwen-rapid-nsfw-v5.2-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdad3ca3cc8fd66579617c0468fd42e8dd46a4c92b467a1fdbfcc43d7208769f
3
+ size 14386109856
v52/qwen-rapid-nsfw-v5.2-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e92b45e1c54708602a1428c20599320a2e90d8e0038ccb786487d92eab94b2d
3
+ size 16994385312
v52/qwen-rapid-nsfw-v5.2-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90313621e0fad4c4244201afb32075eb7afd84f5382fbdbe18e3baabf0f0d9c
3
+ size 21750652320