diff --git a/README.md b/README.md index db81a92..975a583 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +Linux | [Windows](https://github.com/zumrudu-anka/SiamMask/blob/master/READMEForWindows.md) + # SiamMask **NEW:** now including code for both training and inference! @@ -29,12 +31,11 @@ If you find this code useful, please consider citing: } ``` - ## Contents 1. [Environment Setup](#environment-setup) 2. [Demo](#demo) -3. [Testing Models](#testing-models) -4. [Training Models](#training-models) +3. [Testing](#testing) +4. [Training](#training) ## Environment setup This code has been tested on Ubuntu 16.04, Python 3.6, Pytorch 0.4.1, CUDA 9.2, RTX 2080 GPUs diff --git a/READMEForWindows.md b/READMEForWindows.md new file mode 100644 index 0000000..d736498 --- /dev/null +++ b/READMEForWindows.md @@ -0,0 +1,139 @@ +[Linux](https://github.com/zumrudu-anka/SiamMask/blob/master/README.md) | Windows + +## Contents +1. [Environment Setup](#environment-setup) +2. [Demo](#demo) +3. [Testing](#testing) +4. [TODO LIST](#todo-list) +5. [References](#references) + +## Environment Setup +- Clone the [Repository](https://github.com/foolwood/SiamMask) +``` +git clone https://github.com/foolwood/SiamMask.git +set SiamMask=%cd% +``` +- Setup Python Environment +``` +python -m venv venv +venv\Scripts\activate +pip install -r requirements.txt +python make.py +``` +> If you see any error when install packages. Check it and please install up-to-date versions of the packages +- Add the project to your PYTHONPATH +``` +set PYTHONPATH=%PYTHONPATH%; +``` +## Demo +- [Setup](#environment-setup) your environment +- Download the SiamMask model +``` +cd /experiments/siammask_sharp +python downloadSiamMaskModel.py +``` +- Change `tools\demo.py` 26. line `from custom import Custom` to `from experiments.siammask_sharp.custom import Custom` +- Change `experiments\siammask_sharp\custom.py` 9. line `from resnet import resnet50` to `from .resnet import resnet50` +- Run demo.py +``` +cd /experiments/siammask_sharp +set PYTHONPATH=%PYTHONPATH%; +python ../../tools/demo.py --resume SiamMask_DAVIS.pth --config config_davis.json +``` +## Testing +- [Setup](#environment-setup) your environment +- Download test data +``` +cd /data +python get_test_data.py +``` +- Download pretrained models +``` +cd /experiments/siammask_sharp +python downloadSiamMaskModel.py +python ../../tools/demo.py --resume SiamMask_DAVIS.pth --config config_davis.json --base_path ../../data/VOT2019/drone_across +``` + +## TODO LIST + +- Evaluate performance on [VOT](http://www.votchallenge.net/) +``` +.... +``` + +- Evaluate performance on [DAVIS](https://davischallenge.org/) (less than 50s) +``` +.... +``` +- Evaluate performance on [Youtube-VOS](https://youtube-vos.org/) (need download data from [website](https://youtube-vos.org/dataset/download)) +``` +.... +``` + +- ## Training + +- ### Training Data + - Download the [Youtube-VOS](https://youtube-vos.org/dataset/download/), +[COCO](http://cocodataset.org/#download), +[ImageNet-DET](http://image-net.org/challenges/LSVRC/2015/), +and [ImageNet-VID](http://image-net.org/challenges/LSVRC/2015/). + - Preprocess each datasets according the [readme](data/coco/readme.md) files. + +- ### Download the pre-trained model (174 MB) + (This model was trained on the ImageNet-1k Dataset) + ``` + cd $SiamMask/experiments + wget http://www.robots.ox.ac.uk/~qwang/resnet.model + ls | grep siam | xargs -I {} cp resnet.model {} + ``` + +- ### Training SiamMask base model + - [Setup](#environment-setup) your environment + - From the experiment directory, run + ``` + cd $SiamMask/experiments/siammask_base/ + bash run.sh + ``` + - Training takes about 10 hours in our 4 Tesla V100 GPUs. + - If you experience out-of-memory errors, you can reduce the batch size in `run.sh`. + - You can view progress on Tensorboard (logs are at /logs/) + - After training, you can test checkpoints on VOT dataset. + ```shell + bash test_all.sh -s 1 -e 20 -d VOT2018 -g 4 # test all snapshots with 4 GPUs + ``` + - Select best model for hyperparametric search. + ```shell + #bash test_all.sh -m [best_test_model] -d VOT2018 -n [thread_num] -g [gpu_num] # 8 threads with 4 GPUS + bash test_all.sh -m snapshot/checkpoint_e12.pth -d VOT2018 -n 8 -g 4 # 8 threads with 4 GPUS + ``` + +- ### Training SiamMask model with the Refine module + - [Setup](#environment-setup) your environment + - In the experiment file, train with the best SiamMask base model + ``` + cd $SiamMask/experiments/siammask_sharp + bash run.sh + bash run.sh checkpoint_e12.pth + ``` + - You can view progress on Tensorboard (logs are at /logs/) + - After training, you can test checkpoints on VOT dataset + ```shell + bash test_all.sh -s 1 -e 20 -d VOT2018 -g 4 + ``` + +- ### Training SiamRPN++ model (*unofficial*) + - [Setup](#environment-setup) your environment + - From the experiment directory, run + ``` + cd $SiamMask/experiments/siamrpn_resnet + bash run.sh + ``` + - You can view progress on Tensorboard (logs are at /logs/) + - After training, you can test checkpoints on VOT dataset + ```shell + bash test_all.sh -h + bash test_all.sh -s 1 -e 20 -d VOT2018 -g 4 + ``` + +### References +[SiamMask](https://github.com/foolwood/SiamMask) diff --git a/data/filesForMoveToTrackdat/download_vot.py b/data/filesForMoveToTrackdat/download_vot.py new file mode 100644 index 0000000..e93419e --- /dev/null +++ b/data/filesForMoveToTrackdat/download_vot.py @@ -0,0 +1,75 @@ +import sys +import os +import requests +import json +import zipfile +import io +import urllib.request, shutil + +VOT_YEAR = sys.argv[1] +VOT_CHALLENGE = "main" +name = "vot" + VOT_YEAR + "_" + VOT_CHALLENGE + +dl = f"../dl/{name}" + +try: + os.makedirs(dl) +except FileExistsError: + print("Loading...") + +abspath = os.path.abspath(__file__) +projectDir = os.path.dirname(abspath) + +os.chdir(dl) + +base_url = f"https://data.votchallenge.net/vot{VOT_YEAR}/{VOT_CHALLENGE}" + +if os.path.exists("description.json"): + pass +else: + response = requests.get(base_url + "/description.json", allow_redirects = True) + with open("description.json", "wb") as descriptionFile: + descriptionFile.write(response.content) + +with open("annotations.txt", "w") as annotationsFileOutput, open("color.txt", "w") as colorFileOutput, open("list.txt", "w") as listFileOutput: + with open("description.json", "r") as description: + data = json.load(description) + for sequence in data["sequences"]: + annotationsFileOutput.write(sequence["annotations"]["url"] + "\n") + colorFileOutput.write(sequence["channels"]["color"]["url"] + "\n") + listFileOutput.write(sequence["name"] + "\n") + +try: + os.makedirs("annotations") +except FileExistsError: + print("Loading...") + +try: + os.makedirs("color") +except FileExistsError: + print("Loading...") + +os.chdir("./annotations") +with open("../annotations.txt", "r") as annotationsTxtFile: + lines = annotationsTxtFile.readlines() + for line in lines: + url = f"{base_url}/{line}" + response = requests.get(url, stream = True) + with urllib.request.urlopen(url) as response, open(line.strip(), "wb") as downloadedAnnotation: + shutil.copyfileobj(response, downloadedAnnotation) + print("Test datas are downloading...") + +os.chdir("../color") +with open("../color.txt", "r") as colorTxtFile: + lines = colorTxtFile.readlines() + for line in lines: + url = f"{base_url}/{line}" + response = requests.get(url, stream = True) + with urllib.request.urlopen(url) as response, open(line[16:].strip(), "wb") as downloadedColor: + shutil.copyfileobj(response, downloadedColor) + print("Test datas are downloading...") + + # response = requests.get(url, allow_redirects = True) + # with open(line[16:].strip(), "wb") as downloadedColor: + # downloadedColor.write(response.content) + # print("Test datas are downloading...") \ No newline at end of file diff --git a/data/filesForMoveToTrackdat/unpack_vot.py b/data/filesForMoveToTrackdat/unpack_vot.py new file mode 100644 index 0000000..df7082e --- /dev/null +++ b/data/filesForMoveToTrackdat/unpack_vot.py @@ -0,0 +1,22 @@ +import sys +import os +from shutil import copyfile +VOT_YEAR = sys.argv[1] +VOT_CHALLENGE = "main" + +defaultName = f"vot{VOT_YEAR}_{VOT_CHALLENGE}" + +dl = "../" + sys.argv[2] +data = "../../" + sys.argv[3] +# scripts="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +try: + os.makedirs(data) +except FileExistsError: + print("Loading...") + +descriptionFileSrc = f"../dl/{defaultName}/description.json" +descriptionFileDestination = f"{data}/description.json" +copyfile(descriptionFileSrc, descriptionFileDestination) + +os.system(f"python unzip_vot.py {dl} {data}") \ No newline at end of file diff --git a/data/get_test_data.py b/data/get_test_data.py new file mode 100644 index 0000000..5fb3e8e --- /dev/null +++ b/data/get_test_data.py @@ -0,0 +1,65 @@ +import os +from shutil import copyfile +import requests + +abspath = os.path.abspath(__file__) +projectDir = os.path.dirname(abspath) +trackDat = projectDir + "/trackdat" + +if not os.path.exists(trackDat): + os.system("git clone https://github.com/jvlmdr/trackdat.git") +else: + print("Loading...") + +scripts = trackDat + "/scripts" + +copyfile(projectDir + "/filesForMoveToTrackdat/download_vot.py", scripts + "/download_vot.py") +copyfile(projectDir + "/filesForMoveToTrackdat/unpack_vot.py", scripts + "/unpack_vot.py") + +os.chdir(scripts) +os.system("download_vot.py 2016 dl/vot2016") +os.system("download_vot.py 2018 dl/vot2018") +os.system("download_vot.py 2019 dl/vot2019") +os.system("unpack_vot.py 2016 dl/vot2016_main VOT2016") +os.system("unpack_vot.py 2018 dl/vot2018_main VOT2018") +os.system("unpack_vot.py 2019 dl/vot2019_main VOT2019") + +Vot2016ListTxtPath = trackDat + "/dl/vot2016_main/list.txt" +Vot2018ListTxtPath = trackDat + "/dl/vot2018_main/list.txt" +Vot2019ListTxtPath = trackDat + "/dl/vot2019_main/list.txt" + +copyfile(Vot2016ListTxtPath, projectDir + "/VOT2016/list.txt") +copyfile(Vot2018ListTxtPath, projectDir + "/VOT2018/list.txt") +copyfile(Vot2019ListTxtPath, projectDir + "/VOT2019/list.txt") + +os.chdir(projectDir) + +base_url = "http://www.robots.ox.ac.uk/~qwang/" +urlArray = [ + "VOT2016.json", + "VOT2018.json", +] + +for url in urlArray: + response = requests.get(base_url + url, allow_redirects = True) + with open(url, "wb") as jsonFile: + jsonFile.write(response.content) + + + + +# ! TODOOOOO + +# # json file for eval toolkit +# wget http://www.robots.ox.ac.uk/~qwang/VOT2016.json +# wget http://www.robots.ox.ac.uk/~qwang/VOT2018.json +# python create_json.py VOT2019 + +# # DAVIS +# wget https://data.vision.ee.ethz.ch/csergi/share/davis/DAVIS-2017-trainval-480p.zip +# unzip DAVIS-2017-trainval-480p.zip +# ln -s ./DAVIS ./DAVIS2016 +# ln -s ./DAVIS ./DAVIS2017 + + +# # Youtube-VOS diff --git a/experiments/siammask_sharp/downloadSiamMaskModel.py b/experiments/siammask_sharp/downloadSiamMaskModel.py new file mode 100644 index 0000000..98534d7 --- /dev/null +++ b/experiments/siammask_sharp/downloadSiamMaskModel.py @@ -0,0 +1,28 @@ +import requests +import os +#? Basic Example + +# url = 'https://www.facebook.com/favicon.ico' +# r = requests.get(url, allow_redirects=True) + +# open('facebook.ico', 'wb').write(r.content) + +#? Basic Example + +#* Running Part + +base_url = 'http://www.robots.ox.ac.uk/~qwang/' + +urlArray = [ + "SiamMask_VOT.pth", + "SiamMask_VOT_LD.pth", + "SiamMask_DAVIS.pth" +] + +for url in urlArray: + print("Pretrained models are downloading...") + if not os.path.exists(url): + r = requests.get(base_url + url, allow_redirects = True) + open(url, 'wb').write(r.content) + +print("Download complete...") \ No newline at end of file diff --git a/make.py b/make.py new file mode 100644 index 0000000..12ff7e1 --- /dev/null +++ b/make.py @@ -0,0 +1,15 @@ +import os + +abspath = os.path.abspath(__file__) +projectDir = os.path.dirname(abspath) + +pyVotkitDir = "/utils/pyvotkit" +pySotDir = "/utils/pysot/utils" + +os.chdir(projectDir + pyVotkitDir) +os.system("python setup.py build_ext --inplace") + +os.chdir(projectDir + pySotDir) +os.system("python setup.py build_ext --inplace") + +os.chdir(projectDir) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7ed46dd..1dc360a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,6 @@ colorama==0.3.9 numpy==1.15.4 requests==2.21.0 fire==0.1.3 -torch==0.4.1 matplotlib==2.2.3 numba==0.39.0 scipy==1.1.0 @@ -12,5 +11,5 @@ pandas==0.23.4 tqdm==4.29.1 tensorboardX==1.6 opencv_python==3.4.3.18 -torch==0.4.1 -torchvision==0.2.1 + +# pip install torch===1.4.0 torchvision===0.5.0 -f https://download.pytorch.org/whl/torch_stable.html