Skip to content

Commit

Permalink
feat: enable xpu support for meta-reference stack
Browse files Browse the repository at this point in the history
Requires: meta-llama/llama-models#233
Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
  • Loading branch information
dvrogozh committed Jan 30, 2025
1 parent 7fe2592 commit 4956907
Showing 1 changed file with 33 additions and 14 deletions.
47 changes: 33 additions & 14 deletions llama_stack/providers/inline/inference/meta_reference/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,15 +98,27 @@ def build(
"""
llama_model_id = llama_model.core_model_id.value
if not torch.distributed.is_initialized():
torch.distributed.init_process_group("nccl")
if torch.cuda.is_available():
torch.distributed.init_process_group("nccl")
else:
torch.distributed.init_process_group("gloo")

model_parallel_size = llama_model.pth_file_count

if not model_parallel_is_initialized():
initialize_model_parallel(model_parallel_size)

local_rank = int(os.environ.get("LOCAL_RANK", 0))
torch.cuda.set_device(local_rank)
if torch.cuda.is_available():
device = "cuda"
torch.cuda.set_device(local_rank)
elif torch.xpu.is_available():
device = "xpu"
torch.xpu.set_device(local_rank)
else:
raise NotImplementedError(
"Devices other than CUDA and XPU are not supported yet"
)

# seed must be the same in all processes
if config.torch_seed is not None:
Expand Down Expand Up @@ -189,17 +201,26 @@ def build(
"Currently int4 and fp8 are the only supported quantization methods."
)
else:
if torch.cuda.is_bf16_supported():
torch.set_default_tensor_type(torch.cuda.BFloat16Tensor)
else:
torch.set_default_tensor_type(torch.cuda.HalfTensor)
if device == "cuda":
if torch.cuda.is_bf16_supported():
torch.set_default_tensor_type(torch.cuda.BFloat16Tensor)
else:
torch.set_default_tensor_type(torch.cuda.HalfTensor)
elif device == "xpu":
torch.set_default_device(device)
if torch.xpu.is_bf16_supported():
torch.set_default_dtype(torch.bfloat16)
else:
torch.set_default_dtype(torch.half)
if model_args.vision_chunk_size > 0:
model = CrossAttentionTransformer(model_args)
model.setup_cache(model_args.max_batch_size, torch.bfloat16)
else:
model = Transformer(model_args)
model.load_state_dict(state_dict, strict=False)

model.to(device)

log.info(f"Loaded in {time.time() - start_time:.2f} seconds")
return Llama(model, tokenizer, model_args, llama_model_id)

Expand Down Expand Up @@ -266,14 +287,14 @@ def generate(
)

pad_id = self.tokenizer.pad_id
tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long, device="cuda")
tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long)
for k, t in enumerate(prompt_tokens):
tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long)
if logprobs:
token_logprobs = torch.zeros_like(tokens, dtype=torch.float)
token_logprobs = torch.zeros_like(tokens)

prev_pos = 0
eos_reached = torch.tensor([False] * bsz, device="cuda")
eos_reached = torch.tensor([False] * bsz)
input_text_mask = tokens != pad_id
if min_prompt_len == total_len:
# TODO(ashwin): unify this branch with the one below and figure out multimodal crap
Expand All @@ -285,12 +306,10 @@ def generate(
ignore_index=pad_id,
)

stop_tokens = torch.tensor(self.tokenizer.stop_tokens, device="cuda")
stop_tokens = torch.tensor(self.tokenizer.stop_tokens)
for cur_pos in range(min_prompt_len, total_len):
if is_vision:
position_ids = torch.arange(
prev_pos, cur_pos, dtype=torch.long, device="cuda"
)
position_ids = torch.arange(prev_pos, cur_pos, dtype=torch.long)
logits = self.model.forward(
position_ids,
tokens,
Expand Down

0 comments on commit 4956907

Please sign in to comment.