Skip to content

Commit beb99a2

Browse files
authored
feat: add Qwen Image support (#851)
* add qwen tokenizer * add qwen2.5 vl support * mv qwen.hpp -> qwenvl.hpp * add qwen image model * add qwen image t2i pipeline * fix qwen image flash attn * add qwen image i2i pipline * change encoding of vocab_qwen.hpp to utf8 * fix get_first_stage_encoding * apply jeffbolz f32 patch #851 (comment) * fix the issue that occurs when using CUDA with k-quants weights * optimize the handling of the FeedForward precision fix * to_add_out precision fix * update docs
1 parent aa68b87 commit beb99a2

21 files changed

+142286
-246
lines changed

README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ API and command-line option may change frequently.***
2121
- [SD3/SD3.5](./docs/sd3.md)
2222
- [Flux-dev/Flux-schnell](./docs/flux.md)
2323
- [Chroma](./docs/chroma.md)
24+
- [Qwen Image](./docs/qwen_image.md)
2425
- Image Edit Models
2526
- [FLUX.1-Kontext-dev](./docs/kontext.md)
2627
- Video Models
@@ -296,6 +297,7 @@ arguments:
296297
--clip_g path to the clip-g text encoder
297298
--clip_vision path to the clip-vision encoder
298299
--t5xxl path to the t5xxl text encoder
300+
--qwen2vl path to the qwen2vl text encoder
299301
--vae [VAE] path to vae
300302
--taesd [TAESD_PATH] path to taesd. Using Tiny AutoEncoder for fast decoding (low quality)
301303
--control-net [CONTROL_PATH] path to control net model
@@ -464,6 +466,7 @@ Thank you to all the people who have already contributed to stable-diffusion.cpp
464466
## References
465467
466468
- [ggml](https://github.com/ggerganov/ggml)
469+
- [diffusers](https://github.com/huggingface/diffusers)
467470
- [stable-diffusion](https://github.com/CompVis/stable-diffusion)
468471
- [sd3-ref](https://github.com/Stability-AI/sd3-ref)
469472
- [stable-diffusion-stability-ai](https://github.com/Stability-AI/stablediffusion)

assets/qwen/example.png

1.35 MB
Loading

clip.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
/*================================================== CLIPTokenizer ===================================================*/
88

9-
std::pair<std::unordered_map<std::string, float>, std::string> extract_and_remove_lora(std::string text) {
9+
__STATIC_INLINE__ std::pair<std::unordered_map<std::string, float>, std::string> extract_and_remove_lora(std::string text) {
1010
std::regex re("<lora:([^:]+):([^>]+)>");
1111
std::smatch matches;
1212
std::unordered_map<std::string, float> filename2multiplier;
@@ -31,7 +31,7 @@ std::pair<std::unordered_map<std::string, float>, std::string> extract_and_remov
3131
return std::make_pair(filename2multiplier, text);
3232
}
3333

34-
std::vector<std::pair<int, std::u32string>> bytes_to_unicode() {
34+
__STATIC_INLINE__ std::vector<std::pair<int, std::u32string>> bytes_to_unicode() {
3535
std::vector<std::pair<int, std::u32string>> byte_unicode_pairs;
3636
std::set<int> byte_set;
3737
for (int b = static_cast<int>('!'); b <= static_cast<int>('~'); ++b) {

common.hpp

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ class ResBlock : public GGMLBlock {
177177
}
178178
};
179179

180-
class GEGLU : public GGMLBlock {
180+
class GEGLU : public UnaryBlock {
181181
protected:
182182
int64_t dim_in;
183183
int64_t dim_out;
@@ -216,23 +216,57 @@ class GEGLU : public GGMLBlock {
216216
}
217217
};
218218

219+
class GELU : public UnaryBlock {
220+
public:
221+
GELU(int64_t dim_in, int64_t dim_out, bool bias = true) {
222+
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
223+
}
224+
225+
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
226+
// x: [ne3, ne2, ne1, dim_in]
227+
// return: [ne3, ne2, ne1, dim_out]
228+
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
229+
230+
x = proj->forward(ctx, x);
231+
x = ggml_gelu_inplace(ctx, x);
232+
return x;
233+
}
234+
};
235+
219236
class FeedForward : public GGMLBlock {
220237
public:
238+
enum class Activation {
239+
GEGLU,
240+
GELU
241+
};
221242
FeedForward(int64_t dim,
222243
int64_t dim_out,
223-
int64_t mult = 4) {
244+
int64_t mult = 4,
245+
Activation activation = Activation::GEGLU,
246+
bool precision_fix = false) {
224247
int64_t inner_dim = dim * mult;
248+
if (activation == Activation::GELU) {
249+
blocks["net.0"] = std::shared_ptr<GGMLBlock>(new GELU(dim, inner_dim));
250+
} else {
251+
blocks["net.0"] = std::shared_ptr<GGMLBlock>(new GEGLU(dim, inner_dim));
252+
}
225253

226-
blocks["net.0"] = std::shared_ptr<GGMLBlock>(new GEGLU(dim, inner_dim));
227254
// net_1 is nn.Dropout(), skip for inference
228-
blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out));
255+
float scale = 1.f;
256+
if (precision_fix) {
257+
scale = 1.f / 128.f;
258+
}
259+
// The purpose of the scale here is to prevent NaN issues in certain situations.
260+
// For example, when using Vulkan without enabling force_prec_f32,
261+
// or when using CUDA but the weights are k-quants.
262+
blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out, true, false, false, scale));
229263
}
230264

231265
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
232266
// x: [ne3, ne2, ne1, dim]
233267
// return: [ne3, ne2, ne1, dim_out]
234268

235-
auto net_0 = std::dynamic_pointer_cast<GEGLU>(blocks["net.0"]);
269+
auto net_0 = std::dynamic_pointer_cast<UnaryBlock>(blocks["net.0"]);
236270
auto net_2 = std::dynamic_pointer_cast<Linear>(blocks["net.2"]);
237271

238272
x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim]

conditioner.hpp

Lines changed: 138 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#define __CONDITIONER_HPP__
33

44
#include "clip.hpp"
5+
#include "qwenvl.hpp"
56
#include "t5.hpp"
67

78
struct SDCondition {
@@ -22,11 +23,11 @@ struct Conditioner {
2223
int width,
2324
int height,
2425
int adm_in_channels = -1,
25-
bool zero_out_masked = false) = 0;
26-
virtual void alloc_params_buffer() = 0;
27-
virtual void free_params_buffer() = 0;
28-
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) = 0;
29-
virtual size_t get_params_buffer_size() = 0;
26+
bool zero_out_masked = false) = 0;
27+
virtual void alloc_params_buffer() = 0;
28+
virtual void free_params_buffer() = 0;
29+
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) = 0;
30+
virtual size_t get_params_buffer_size() = 0;
3031
virtual std::tuple<SDCondition, std::vector<bool>> get_learned_condition_with_trigger(ggml_context* work_ctx,
3132
int n_threads,
3233
const std::string& text,
@@ -35,9 +36,13 @@ struct Conditioner {
3536
int height,
3637
int num_input_imgs,
3738
int adm_in_channels = -1,
38-
bool zero_out_masked = false) = 0;
39+
bool zero_out_masked = false) {
40+
GGML_ABORT("Not implemented yet!");
41+
}
3942
virtual std::string remove_trigger_from_prompt(ggml_context* work_ctx,
40-
const std::string& prompt) = 0;
43+
const std::string& prompt) {
44+
GGML_ABORT("Not implemented yet!");
45+
}
4146
};
4247

4348
// ldm.modules.encoders.modules.FrozenCLIPEmbedder
@@ -978,23 +983,6 @@ struct SD3CLIPEmbedder : public Conditioner {
978983
auto tokens_and_weights = tokenize(text, 77, true);
979984
return get_learned_condition_common(work_ctx, n_threads, tokens_and_weights, clip_skip, zero_out_masked);
980985
}
981-
982-
std::tuple<SDCondition, std::vector<bool>> get_learned_condition_with_trigger(ggml_context* work_ctx,
983-
int n_threads,
984-
const std::string& text,
985-
int clip_skip,
986-
int width,
987-
int height,
988-
int num_input_imgs,
989-
int adm_in_channels = -1,
990-
bool zero_out_masked = false) {
991-
GGML_ASSERT(0 && "Not implemented yet!");
992-
}
993-
994-
std::string remove_trigger_from_prompt(ggml_context* work_ctx,
995-
const std::string& prompt) {
996-
GGML_ASSERT(0 && "Not implemented yet!");
997-
}
998986
};
999987

1000988
struct FluxCLIPEmbedder : public Conditioner {
@@ -1195,23 +1183,6 @@ struct FluxCLIPEmbedder : public Conditioner {
11951183
auto tokens_and_weights = tokenize(text, chunk_len, true);
11961184
return get_learned_condition_common(work_ctx, n_threads, tokens_and_weights, clip_skip, zero_out_masked);
11971185
}
1198-
1199-
std::tuple<SDCondition, std::vector<bool>> get_learned_condition_with_trigger(ggml_context* work_ctx,
1200-
int n_threads,
1201-
const std::string& text,
1202-
int clip_skip,
1203-
int width,
1204-
int height,
1205-
int num_input_imgs,
1206-
int adm_in_channels = -1,
1207-
bool zero_out_masked = false) {
1208-
GGML_ASSERT(0 && "Not implemented yet!");
1209-
}
1210-
1211-
std::string remove_trigger_from_prompt(ggml_context* work_ctx,
1212-
const std::string& prompt) {
1213-
GGML_ASSERT(0 && "Not implemented yet!");
1214-
}
12151186
};
12161187

12171188
struct T5CLIPEmbedder : public Conditioner {
@@ -1398,22 +1369,135 @@ struct T5CLIPEmbedder : public Conditioner {
13981369
auto tokens_and_weights = tokenize(text, chunk_len, true);
13991370
return get_learned_condition_common(work_ctx, n_threads, tokens_and_weights, clip_skip, zero_out_masked);
14001371
}
1372+
};
14011373

1402-
std::tuple<SDCondition, std::vector<bool>> get_learned_condition_with_trigger(ggml_context* work_ctx,
1403-
int n_threads,
1404-
const std::string& text,
1405-
int clip_skip,
1406-
int width,
1407-
int height,
1408-
int num_input_imgs,
1409-
int adm_in_channels = -1,
1410-
bool zero_out_masked = false) {
1411-
GGML_ASSERT(0 && "Not implemented yet!");
1374+
struct Qwen2_5_VLCLIPEmbedder : public Conditioner {
1375+
Qwen::Qwen2Tokenizer tokenizer;
1376+
std::shared_ptr<Qwen::Qwen2_5_VLRunner> qwenvl;
1377+
int prompt_template_encode_start_idx = 34;
1378+
1379+
Qwen2_5_VLCLIPEmbedder(ggml_backend_t backend,
1380+
bool offload_params_to_cpu,
1381+
const String2GGMLType& tensor_types = {},
1382+
const std::string prefix = "") {
1383+
qwenvl = std::make_shared<Qwen::Qwen2_5_VLRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.qwen2vl");
14121384
}
14131385

1414-
std::string remove_trigger_from_prompt(ggml_context* work_ctx,
1415-
const std::string& prompt) {
1416-
GGML_ASSERT(0 && "Not implemented yet!");
1386+
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
1387+
qwenvl->get_param_tensors(tensors, "text_encoders.qwen2vl");
1388+
}
1389+
1390+
void alloc_params_buffer() {
1391+
qwenvl->alloc_params_buffer();
1392+
}
1393+
1394+
void free_params_buffer() {
1395+
qwenvl->free_params_buffer();
1396+
}
1397+
1398+
size_t get_params_buffer_size() {
1399+
size_t buffer_size = 0;
1400+
buffer_size += qwenvl->get_params_buffer_size();
1401+
return buffer_size;
1402+
}
1403+
1404+
std::tuple<std::vector<int>, std::vector<float>> tokenize(std::string text,
1405+
size_t max_length = 0,
1406+
bool padding = false) {
1407+
auto parsed_attention = parse_prompt_attention(text);
1408+
1409+
{
1410+
std::stringstream ss;
1411+
ss << "[";
1412+
for (const auto& item : parsed_attention) {
1413+
ss << "['" << item.first << "', " << item.second << "], ";
1414+
}
1415+
ss << "]";
1416+
LOG_DEBUG("parse '%s' to %s", text.c_str(), ss.str().c_str());
1417+
}
1418+
1419+
std::vector<int> tokens;
1420+
std::vector<float> weights;
1421+
for (const auto& item : parsed_attention) {
1422+
const std::string& curr_text = item.first;
1423+
float curr_weight = item.second;
1424+
std::vector<int> curr_tokens = tokenizer.tokenize(curr_text, nullptr);
1425+
tokens.insert(tokens.end(), curr_tokens.begin(), curr_tokens.end());
1426+
weights.insert(weights.end(), curr_tokens.size(), curr_weight);
1427+
}
1428+
1429+
tokenizer.pad_tokens(tokens, weights, max_length, padding);
1430+
1431+
// for (int i = 0; i < tokens.size(); i++) {
1432+
// std::cout << tokens[i] << ":" << weights[i] << ", ";
1433+
// }
1434+
// std::cout << std::endl;
1435+
1436+
return {tokens, weights};
1437+
}
1438+
1439+
SDCondition get_learned_condition_common(ggml_context* work_ctx,
1440+
int n_threads,
1441+
std::tuple<std::vector<int>, std::vector<float>> token_and_weights,
1442+
int clip_skip,
1443+
bool zero_out_masked = false) {
1444+
auto& tokens = std::get<0>(token_and_weights);
1445+
auto& weights = std::get<1>(token_and_weights);
1446+
1447+
int64_t t0 = ggml_time_ms();
1448+
struct ggml_tensor* hidden_states = NULL; // [N, n_token, 3584]
1449+
1450+
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
1451+
1452+
qwenvl->compute(n_threads,
1453+
input_ids,
1454+
&hidden_states,
1455+
work_ctx);
1456+
{
1457+
auto tensor = hidden_states;
1458+
float original_mean = ggml_tensor_mean(tensor);
1459+
for (int i2 = 0; i2 < tensor->ne[2]; i2++) {
1460+
for (int i1 = 0; i1 < tensor->ne[1]; i1++) {
1461+
for (int i0 = 0; i0 < tensor->ne[0]; i0++) {
1462+
float value = ggml_tensor_get_f32(tensor, i0, i1, i2);
1463+
value *= weights[i1];
1464+
ggml_tensor_set_f32(tensor, value, i0, i1, i2);
1465+
}
1466+
}
1467+
}
1468+
float new_mean = ggml_tensor_mean(tensor);
1469+
ggml_tensor_scale(tensor, (original_mean / new_mean));
1470+
}
1471+
1472+
GGML_ASSERT(hidden_states->ne[1] > prompt_template_encode_start_idx);
1473+
1474+
ggml_tensor* new_hidden_states = ggml_new_tensor_3d(work_ctx,
1475+
GGML_TYPE_F32,
1476+
hidden_states->ne[0],
1477+
hidden_states->ne[1] - prompt_template_encode_start_idx,
1478+
hidden_states->ne[2]);
1479+
1480+
ggml_tensor_iter(new_hidden_states, [&](ggml_tensor* new_hidden_states, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
1481+
float value = ggml_tensor_get_f32(hidden_states, i0, i1 + prompt_template_encode_start_idx, i2, i3);
1482+
ggml_tensor_set_f32(new_hidden_states, value, i0, i1, i2, i3);
1483+
});
1484+
1485+
int64_t t1 = ggml_time_ms();
1486+
LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0);
1487+
return SDCondition(new_hidden_states, nullptr, nullptr);
1488+
}
1489+
1490+
SDCondition get_learned_condition(ggml_context* work_ctx,
1491+
int n_threads,
1492+
const std::string& text,
1493+
int clip_skip,
1494+
int width,
1495+
int height,
1496+
int adm_in_channels = -1,
1497+
bool zero_out_masked = false) {
1498+
std::string prompt = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n" + text + "<|im_end|>\n<|im_start|>assistant\n";
1499+
auto tokens_and_weights = tokenize(prompt, 0, false);
1500+
return get_learned_condition_common(work_ctx, n_threads, tokens_and_weights, clip_skip, zero_out_masked);
14171501
}
14181502
};
14191503

0 commit comments

Comments
 (0)