From 86435c8084df9923ce461bea6e32c9e637686b0b Mon Sep 17 00:00:00 2001 From: zshyang <57812785+zshyang@users.noreply.github.com> Date: Thu, 30 Nov 2023 21:46:39 -0800 Subject: [PATCH] Update index.html --- index.html | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/index.html b/index.html index 2e35c5c..d1a3991 100644 --- a/index.html +++ b/index.html @@ -625,7 +625,7 @@

Framework Overview

In (b), human motion is fed into the human motion encoder to produce a semantic-aware, subject-invariant latent code Z. The CLIP feature of the subject-translated sentence and Z are concatenated together and passed into the animal decoders. During inference in (c), we generate animal motions based on human motion sequences sampled from generative models. - Details on the architecture, loss functions, and inference process are elaborated in paper. + Details on the architecture, loss functions, and inference process are elaborated in the paper.

@@ -636,7 +636,8 @@

Bibtex

@misc{yang2023omnimotiongpt, title={OmniMotionGPT: Animal Motion Generation with Limited Data}, - author={Zhangsihao Yang and Mingyuan Zhou and Mengyi Shan and Bingbing Wen and Ziwei Xuan and Mitch Hill and Junjie Bai and Guo-Jun Qi and Yalin Wang}, + author={Zhangsihao Yang and Mingyuan Zhou and Mengyi Shan and Bingbing Wen and Ziwei Xuan and Mitch Hill
+ and Junjie Bai and Guo-Jun Qi and Yalin Wang}, year={2023}, eprint={2311.18303}, archivePrefix={arXiv},