diff --git a/.github/scripts/validate_binaries.sh b/.github/scripts/validate_binaries.sh index ddf28f105..ffefda485 100755 --- a/.github/scripts/validate_binaries.sh +++ b/.github/scripts/validate_binaries.sh @@ -30,7 +30,7 @@ conda run -n "${CONDA_ENV}" python --version # Install pytorch, torchrec and fbgemm as per # installation instructions on following page -# https://github.com/pytorch/torchrec#installations +# https://github.com/meta-pytorch/torchrec#installations # figure out CUDA VERSION diff --git a/.github/workflows/build-wheels-linux.yml b/.github/workflows/build-wheels-linux.yml index a258658b3..dd60894b9 100644 --- a/.github/workflows/build-wheels-linux.yml +++ b/.github/workflows/build-wheels-linux.yml @@ -35,7 +35,7 @@ jobs: - name: Checkout torchrec repository uses: actions/checkout@v4 with: - repository: pytorch/torchrec + repository: meta-pytorch/torchrec - name: Filter Generated Built Matrix id: filter env: @@ -49,10 +49,10 @@ jobs: echo "matrix=${MATRIX_BLOB}" >> "${GITHUB_OUTPUT}" build: needs: filter-matrix - name: pytorch/torchrec + name: meta-pytorch/torchrec uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main with: - repository: pytorch/torchrec + repository: meta-pytorch/torchrec ref: "" test-infra-repository: pytorch/test-infra test-infra-ref: main diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 547fd0d77..3f4289782 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -121,4 +121,4 @@ jobs: s3-bucket: doc-previews if-no-files-found: error path: docs - s3-prefix: pytorch/torchrec/${{ github.event.pull_request.number }} + s3-prefix: meta-pytorch/torchrec/${{ github.event.pull_request.number }} diff --git a/.github/workflows/validate-binaries.yml b/.github/workflows/validate-binaries.yml index 075aeabdf..431e41033 100644 --- a/.github/workflows/validate-binaries.yml +++ b/.github/workflows/validate-binaries.yml @@ -36,7 +36,7 @@ jobs: package_type: "wheel" os: "linux" channel: ${{ inputs.channel }} - repository: "pytorch/torchrec" + repository: "meta-pytorch/torchrec" smoke_test: "source ./.github/scripts/validate_binaries.sh" with_cuda: enable with_rocm: false diff --git a/README.MD b/README.MD index 8c7e5d19c..0c212410c 100644 --- a/README.MD +++ b/README.MD @@ -59,7 +59,7 @@ Check out the [Getting Started](https://pytorch.org/torchrec/setup-torchrec.html 2. Clone TorchRec. ``` - git clone --recursive https://github.com/pytorch/torchrec + git clone --recursive https://github.com/meta-pytorch/torchrec cd torchrec ``` @@ -108,7 +108,7 @@ Check out the [Getting Started](https://pytorch.org/torchrec/setup-torchrec.html ## Contributing -See [CONTRIBUTING.md](https://github.com/pytorch/torchrec/blob/main/CONTRIBUTING.md) for details about contributing to TorchRec! +See [CONTRIBUTING.md](https://github.com/meta-pytorch/torchrec/blob/main/CONTRIBUTING.md) for details about contributing to TorchRec! ## Citation diff --git a/benchmarks/README.md b/benchmarks/README.md index 1cabc1534..d978e3da0 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -4,7 +4,7 @@ We evaluate the performance of two EmbeddingBagCollection modules: 1. `EmbeddingBagCollection` (EBC) ([code](https://pytorch.org/torchrec/torchrec.modules.html#torchrec.modules.embedding_modules.EmbeddingBagCollection)): a simple module backed by [torch.nn.EmbeddingBag](https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html). -2. `FusedEmbeddingBagCollection` (Fused EBC) ([code](https://github.com/pytorch/torchrec/blob/main/torchrec/modules/fused_embedding_bag_collection.py#L299)): a module backed by [FBGEMM](https://github.com/pytorch/FBGEMM) kernels which enables more efficient, high-performance operations on embedding tables. It is equipped with a fused optimizer, and UVM caching/management that makes much larger memory available for GPUs. +2. `FusedEmbeddingBagCollection` (Fused EBC) ([code](https://github.com/meta-pytorch/torchrec/blob/main/torchrec/modules/fused_embedding_bag_collection.py#L299)): a module backed by [FBGEMM](https://github.com/pytorch/FBGEMM) kernels which enables more efficient, high-performance operations on embedding tables. It is equipped with a fused optimizer, and UVM caching/management that makes much larger memory available for GPUs. ## Module architecture and running setup @@ -24,7 +24,7 @@ Other setup includes: ## How to run -After the installation of Torchrec (see "Binary" in the "Installation" section, [link](https://github.com/pytorch/torchrec)), run the following command under the benchmark directory (/torchrec/torchrec/benchmarks): +After the installation of Torchrec (see "Binary" in the "Installation" section, [link](https://github.com/meta-pytorch/torchrec)), run the following command under the benchmark directory (/torchrec/torchrec/benchmarks): ``` python ebc_benchmarks.py [--mode MODE] [--cpu_only] diff --git a/setup.py b/setup.py index bc46fef15..b0fb2f9d3 100644 --- a/setup.py +++ b/setup.py @@ -92,7 +92,7 @@ def main(argv: List[str]) -> None: description="TorchRec: Pytorch library for recommendation systems", long_description=readme, long_description_content_type="text/markdown", - url="https://github.com/pytorch/torchrec", + url="https://github.com/meta-pytorch/torchrec", license="BSD-3", keywords=[ "pytorch", diff --git a/torchrec/distributed/train_pipeline/pipeline_stage.py b/torchrec/distributed/train_pipeline/pipeline_stage.py index 475a3b090..334e26aad 100644 --- a/torchrec/distributed/train_pipeline/pipeline_stage.py +++ b/torchrec/distributed/train_pipeline/pipeline_stage.py @@ -449,7 +449,7 @@ def forward_hook( ) -> None: # Note: tricky part - a bit delicate choreography between # StagedPipeline and this class - # (see https://github.com/pytorch/torchrec/pull/2239 for details) + # (see https://github.com/meta-pytorch/torchrec/pull/2239 for details) # wait_dist need to be called as post_forward hook # at the end of the batch N, so that the data is awaited # before start of the next batch. diff --git a/torchrec/distributed/train_pipeline/runtime_forwards.py b/torchrec/distributed/train_pipeline/runtime_forwards.py index a47ee9ebd..07eb955e7 100644 --- a/torchrec/distributed/train_pipeline/runtime_forwards.py +++ b/torchrec/distributed/train_pipeline/runtime_forwards.py @@ -76,7 +76,7 @@ def __call__(self, *input, **kwargs) -> Awaitable: self._name in self._context.input_dist_tensors_requests ), f"Invalid PipelinedForward usage, input_dist of {self._name} is not available, probably consumed by others" # we made a basic assumption that an embedding module (EBC, EC, etc.) should only be evoked only - # once in the model's forward pass. For more details: https://github.com/pytorch/torchrec/pull/3294 + # once in the model's forward pass. For more details: https://github.com/meta-pytorch/torchrec/pull/3294 request = self._context.input_dist_tensors_requests.pop(self._name) assert isinstance(request, Awaitable) with record_function("## wait_sparse_data_dist ##"): @@ -125,7 +125,7 @@ def __call__( self._name in self._context.embedding_a2a_requests ), f"Invalid PipelinedForward usage, input_dist of {self._name} is not available, probably consumed by others" # we made a basic assumption that an embedding module (EBC, EC, etc.) should only be evoked only - # once in the model's forward pass. For more details: https://github.com/pytorch/torchrec/pull/3294 + # once in the model's forward pass. For more details: https://github.com/meta-pytorch/torchrec/pull/3294 ctx = self._context.module_contexts.pop(self._name) cur_stream = torch.get_device_module(self._device).current_stream() diff --git a/torchrec/distributed/train_pipeline/train_pipelines.py b/torchrec/distributed/train_pipeline/train_pipelines.py index aad1c3ea9..354077c79 100644 --- a/torchrec/distributed/train_pipeline/train_pipelines.py +++ b/torchrec/distributed/train_pipeline/train_pipelines.py @@ -521,7 +521,7 @@ def detach(self) -> torch.nn.Module: Detaches the model from sparse data dist (SDD) pipeline. A user might want to get the original model back after training. The original model.forward was previously modified by the train pipeline. for more please see: - https://github.com/pytorch/torchrec/pull/2076 + https://github.com/meta-pytorch/torchrec/pull/2076 To use the pipeline after detaching the model, pipeline.attach(model) needs to be called. @@ -547,7 +547,7 @@ def attach( """ should be used with detach function. these functions should only be used from user code, when user want to switch the train pipeline. for more please see: - https://github.com/pytorch/torchrec/pull/2076 + https://github.com/meta-pytorch/torchrec/pull/2076 """ if model: self._model = model diff --git a/torchrec/inference/README.md b/torchrec/inference/README.md index 0a8e1cc17..c9f0ac355 100644 --- a/torchrec/inference/README.md +++ b/torchrec/inference/README.md @@ -40,7 +40,7 @@ export FBGEMM_LIB="" Here, we generate the DLRM model in Torchscript and save it for model loading later on. ``` -git clone https://github.com/pytorch/torchrec.git +git clone https://github.com/meta-pytorch/torchrec.git cd ~/torchrec/torchrec/inference/ python3 dlrm_packager.py --output_path /tmp/model.pt