Fixed bugs with latest transformers.
#27
by
RezaAMehr
- opened
README.md
CHANGED
|
@@ -68,10 +68,10 @@ for message in conversation:
|
|
| 68 |
sr=processor.feature_extractor.sampling_rate)[0]
|
| 69 |
)
|
| 70 |
|
| 71 |
-
inputs = processor(text=text,
|
| 72 |
-
inputs
|
| 73 |
|
| 74 |
-
generate_ids = model.generate(**inputs,
|
| 75 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 76 |
|
| 77 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
@@ -116,10 +116,10 @@ for message in conversation:
|
|
| 116 |
sr=processor.feature_extractor.sampling_rate)[0]
|
| 117 |
)
|
| 118 |
|
| 119 |
-
inputs = processor(text=text,
|
| 120 |
-
inputs
|
| 121 |
|
| 122 |
-
generate_ids = model.generate(**inputs,
|
| 123 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 124 |
|
| 125 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
@@ -171,11 +171,11 @@ for conversation in conversations:
|
|
| 171 |
sr=processor.feature_extractor.sampling_rate)[0]
|
| 172 |
)
|
| 173 |
|
| 174 |
-
inputs = processor(text=text,
|
| 175 |
inputs['input_ids'] = inputs['input_ids'].to("cuda")
|
| 176 |
-
inputs
|
| 177 |
|
| 178 |
-
generate_ids = model.generate(**inputs,
|
| 179 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 180 |
|
| 181 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
|
|
|
| 68 |
sr=processor.feature_extractor.sampling_rate)[0]
|
| 69 |
)
|
| 70 |
|
| 71 |
+
inputs = processor(text=text, audio=audios, return_tensors="pt", padding=True)
|
| 72 |
+
inputs = inputs.to("cuda")
|
| 73 |
|
| 74 |
+
generate_ids = model.generate(**inputs, max_new_tokens=256)
|
| 75 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 76 |
|
| 77 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
|
| 116 |
sr=processor.feature_extractor.sampling_rate)[0]
|
| 117 |
)
|
| 118 |
|
| 119 |
+
inputs = processor(text=text, audio=audios, return_tensors="pt", padding=True)
|
| 120 |
+
inputs = inputs.to("cuda")
|
| 121 |
|
| 122 |
+
generate_ids = model.generate(**inputs, max_new_tokens=256)
|
| 123 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 124 |
|
| 125 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
|
| 171 |
sr=processor.feature_extractor.sampling_rate)[0]
|
| 172 |
)
|
| 173 |
|
| 174 |
+
inputs = processor(text=text, audio=audios, return_tensors="pt", padding=True)
|
| 175 |
inputs['input_ids'] = inputs['input_ids'].to("cuda")
|
| 176 |
+
inputs = inputs.to("cuda")
|
| 177 |
|
| 178 |
+
generate_ids = model.generate(**inputs, max_new_tokens=256)
|
| 179 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 180 |
|
| 181 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|