|
30 | 30 | "stage_ids_this_rank", |
31 | 31 | "generate_module_names_per_stage", |
32 | 32 | "pipeline_module_split", |
| 33 | + "module_split", |
33 | 34 | ] |
34 | 35 |
|
35 | 36 |
|
@@ -333,3 +334,95 @@ def _build_stage_from_modules( |
333 | 334 | models.append(model_chunk) |
334 | 335 |
|
335 | 336 | return stages, models |
| 337 | + |
| 338 | +def module_split( |
| 339 | + model: nn.Module, |
| 340 | + module_names_per_stage: list[list[str]], |
| 341 | +) -> list[nn.Module]: |
| 342 | + """ |
| 343 | + This API creates pipeline stages based on specified module names for each stage. |
| 344 | + This method updates the model in place. |
| 345 | +
|
| 346 | + Args: |
| 347 | + model: The complete model to be split |
| 348 | + module_names_per_stage: List of lists, where each inner list contains the module names |
| 349 | + that should be included in that stage. Module names should be |
| 350 | + dot-separated paths. Examples: |
| 351 | + - "tok_embeddings" for token embeddings |
| 352 | + - "layers.0", "layers.1" for specific transformer layers |
| 353 | + - "norm" for the final normalization layer |
| 354 | + - "output" for the output projection layer |
| 355 | +
|
| 356 | + Returns: |
| 357 | + List of model chunks |
| 358 | +
|
| 359 | + Example usage: |
| 360 | + module_names_per_stage = [ |
| 361 | + ["tok_embeddings", "layers.0"], # Stage 0: embeddings + first layer |
| 362 | + ["layers.1", "layers.2"], # Stage 1: middle layers |
| 363 | + ["norm", "output"] # Stage 2: final norm + output |
| 364 | + ] |
| 365 | + """ |
| 366 | + def _build_stage_from_modules( |
| 367 | + stage_idx: int, module_names: list[str] |
| 368 | + ) -> nn.Module: |
| 369 | + stage_model = nn.Module() |
| 370 | + # Create a set of modules to keep for faster lookup |
| 371 | + modules_to_keep = set(module_names) |
| 372 | + print(f"Stage {stage_idx}: Modules to keep: {modules_to_keep}") |
| 373 | + for module_name, module_value in model.named_children(): |
| 374 | + # Handle layer-like structures (e.g., "layers.0", "layers.1") |
| 375 | + if isinstance(module_value, (nn.ModuleDict, nn.ModuleList)): |
| 376 | + layers_to_keep = { |
| 377 | + name.split(".", 1)[1] |
| 378 | + for name in modules_to_keep |
| 379 | + if name.startswith(f"{module_name}.") |
| 380 | + } |
| 381 | + |
| 382 | + if not layers_to_keep: |
| 383 | + continue |
| 384 | + |
| 385 | + # Keep only specified layers |
| 386 | + if isinstance(module_value, nn.ModuleDict): |
| 387 | + for layer_name in list(module_value.keys()): |
| 388 | + if layer_name in layers_to_keep: |
| 389 | + setattr(stage_model, f"{module_name}.{layer_name}", module_value) |
| 390 | + else: |
| 391 | + indices_to_keep = { |
| 392 | + int(idx) for idx in layers_to_keep if idx.isdigit() |
| 393 | + } |
| 394 | + new_layers = nn.ModuleList( |
| 395 | + [ |
| 396 | + layer |
| 397 | + for i, layer in enumerate(module_value) |
| 398 | + if i in indices_to_keep |
| 399 | + ] |
| 400 | + ) |
| 401 | + setattr(stage_model, module_name, new_layers) |
| 402 | + |
| 403 | + continue |
| 404 | + |
| 405 | + # Handle simple module attributes (e.g., "linear", "norm") |
| 406 | + if module_name not in modules_to_keep: |
| 407 | + continue |
| 408 | + |
| 409 | + setattr(stage_model, module_name, module_value) |
| 410 | + |
| 411 | + return model |
| 412 | + |
| 413 | + num_stages = len(module_names_per_stage) |
| 414 | + models = [] |
| 415 | + |
| 416 | + for stage_idx in range(num_stages): |
| 417 | + module_names = module_names_per_stage[stage_idx] |
| 418 | + model_chunk = _build_stage_from_modules( |
| 419 | + stage_idx, |
| 420 | + module_names, |
| 421 | + ) |
| 422 | + logger.info( |
| 423 | + f"building stage_idx {stage_idx} " |
| 424 | + f"with modules {module_names}" |
| 425 | + ) |
| 426 | + models.append(model_chunk) |
| 427 | + |
| 428 | + return models |
0 commit comments