vito95311 commited on
Commit
aada813
·
2 Parent(s): d4ef36e 77766b8

Merge with remote repository and update GGUF documentation

Browse files

- Integrate comprehensive .gitattributes from remote
- Add GGUF file support for quantized models
- Update README.md with latest GGUF features
- Maintain compatibility with HuggingFace Hub

Files changed (2) hide show
  1. .gitattributes +34 -1
  2. README.md +8 -1
.gitattributes CHANGED
@@ -1,3 +1,36 @@
1
- *.gguf filter=lfs diff=lfs merge=lfs -text
 
2
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.gguf filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -28,7 +28,7 @@ model-index:
28
  - type: tokens_per_second
29
  value: 25.3
30
  library_name: llama.cpp
31
- base_model: Qwen/Qwen3-Omni
32
  ---
33
 
34
  # 🔥 Qwen3-Omni **GGUF量化版本** - Ollama & llama.cpp 專用
@@ -76,6 +76,11 @@ ollama create qwen3-omni-quantized -f Qwen3OmniQuantized.modelfile
76
  ollama run qwen3-omni-quantized
77
  ```
78
 
 
 
 
 
 
79
  ### 🖥️ 方法2: llama.cpp 直接運行
80
 
81
  ```bash
@@ -326,3 +331,5 @@ qwen3-omni-gguf/
326
  ---
327
 
328
  *專為GGUF生態打造,讓大模型觸手可及* 🌍
 
 
 
28
  - type: tokens_per_second
29
  value: 25.3
30
  library_name: llama.cpp
31
+ base_model: Qwen/Qwen3-Omni-30B-A3B-Thinking
32
  ---
33
 
34
  # 🔥 Qwen3-Omni **GGUF量化版本** - Ollama & llama.cpp 專用
 
76
  ollama run qwen3-omni-quantized
77
  ```
78
 
79
+ ```bash
80
+ # 或直接使用ollama pull指令下載並創建
81
+ ollama pull hf.co/vito95311/Qwen3-Omni-30B-A3B-Thinking-GGUF-INT8FP16
82
+ ```
83
+
84
  ### 🖥️ 方法2: llama.cpp 直接運行
85
 
86
  ```bash
 
331
  ---
332
 
333
  *專為GGUF生態打造,讓大模型觸手可及* 🌍
334
+
335
+