Skip to content
This repository has been archived by the owner on Nov 3, 2023. It is now read-only.

More tests #4999

Merged
merged 47 commits into from
Apr 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
451c25a
_initialization_data_dicts type: ignore
mojtaba-komeili Mar 31, 2023
bb41a5d
lint
mojtaba-komeili Mar 31, 2023
50a793e
declared type
mojtaba-komeili Mar 31, 2023
298e509
lint
mojtaba-komeili Mar 31, 2023
2c53c4e
merged
mojtaba-komeili Mar 31, 2023
31a1624
self removed
mojtaba-komeili Mar 31, 2023
9c8e339
more relaxed typing
mojtaba-komeili Apr 3, 2023
e27d253
# type: ignore
mojtaba-komeili Apr 3, 2023
9c4c9cc
added init to multi model cahat
mojtaba-komeili Apr 3, 2023
a514a8a
CI image upgrade
mojtaba-komeili Apr 4, 2023
0c9049e
removed the extra naming fomr CI image
mojtaba-komeili Apr 4, 2023
d8e31c7
CI install
mojtaba-komeili Apr 4, 2023
170c3e0
fooling around
mojtaba-komeili Apr 4, 2023
aebf6b4
another checkpoint
mojtaba-komeili Apr 4, 2023
4f174bb
pip upgrade
mojtaba-komeili Apr 4, 2023
5810c66
cache update
mojtaba-komeili Apr 4, 2023
859fa81
removed cu113
mojtaba-komeili Apr 4, 2023
a748e9a
torch version
mojtaba-komeili Apr 4, 2023
2d240e5
torchtext bump up
mojtaba-komeili Apr 4, 2023
5a23aea
torch 1.13
mojtaba-komeili Apr 4, 2023
9b030c6
torch text upper
mojtaba-komeili Apr 4, 2023
5389714
torch vision version
mojtaba-komeili Apr 4, 2023
3801012
relaxed torch
mojtaba-komeili Apr 4, 2023
8a19231
just do it
mojtaba-komeili Apr 4, 2023
e2ba3fa
clean install
mojtaba-komeili Apr 4, 2023
8c1d605
torch vision ~
mojtaba-komeili Apr 4, 2023
5c3c6b5
setup matching the requirements
mojtaba-komeili Apr 4, 2023
c7b8759
setuptools relaxed
mojtaba-komeili Apr 4, 2023
aa30d3f
setup debug
mojtaba-komeili Apr 4, 2023
24abe1b
sdf
mojtaba-komeili Apr 4, 2023
6531f03
tv <=
mojtaba-komeili Apr 4, 2023
04c1555
removed torch vision
mojtaba-komeili Apr 4, 2023
38b6d30
more
mojtaba-komeili Apr 4, 2023
9f16d15
switched to conda image
mojtaba-komeili Apr 5, 2023
67d79b4
actually
mojtaba-komeili Apr 5, 2023
bfa6722
conda setup
mojtaba-komeili Apr 5, 2023
16948bc
conda init
mojtaba-komeili Apr 5, 2023
af63064
deactivate
mojtaba-komeili Apr 5, 2023
ae7c74f
more init
mojtaba-komeili Apr 5, 2023
f9ec112
added bash
mojtaba-komeili Apr 5, 2023
a124db8
conda install
mojtaba-komeili Apr 5, 2023
d50315a
more echos
mojtaba-komeili Apr 5, 2023
9bf569e
more echo
mojtaba-komeili Apr 5, 2023
0981f3a
sourcing
mojtaba-komeili Apr 5, 2023
29192a0
conda activate to bash env
mojtaba-komeili Apr 5, 2023
0d1ef49
fixing torch on some working version
mojtaba-komeili Apr 5, 2023
58c4cbc
venv
mojtaba-komeili Apr 5, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 44 additions & 14 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,21 @@ version: 2.1
executors:
standard_cpu38:
docker:
- image: circleci/python:3.8.0-buster-node
- image: cimg/python:3.8.0
environment:
PYTHONUNBUFFERED: 1
resource_class: xlarge

small_cpu38:
docker:
- image: circleci/python:3.8.0-buster-node
- image: cimg/python:3.8.0
environment:
PYTHONUNBUFFERED: 1
resource_class: medium

small_cpu38_conda:
docker:
- image: continuumio/miniconda3
environment:
PYTHONUNBUFFERED: 1
resource_class: medium
Expand Down Expand Up @@ -75,6 +82,27 @@ commands:
. ~/venv/bin/activate
python --version

setup_conda:
description: Sets up the virtual environment
steps:
- run:
name: Sets up the virtual environment
command: |
echo "Started creating the ParlAI conda environment."
conda create --quiet --yes --name conda_parlai python=3.8
echo "Created the ParlAI conda environment."
conda init bash
source ~/.bashrc
conda deactivate
echo "Activating the ParlAI conda environment."
conda activate conda_parlai
for package in pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 pytorch-cuda=11.6
do
echo "Conda installing ${package}."
conda install --quiet --yes "${package}" -c pytorch -c nvidia
done
echo "conda activate conda_parlai" >> $BASH_ENV

codecov:
description: Coverage report
steps:
Expand All @@ -91,7 +119,8 @@ commands:
- run:
name: Installs basic dependencies
command: |
for i in $(seq 1 3); do python -m pip install --progress-bar off pip setuptools==62.3.4 && s=0 && break || s=$? && sleep 10; done; (exit $s)
pip install --upgrade pip
for i in $(seq 1 3); do python -m pip install --progress-bar off pip setuptools && s=0 && break || s=$? && sleep 10; done; (exit $s)
for i in $(seq 1 3); do python -m pip install --progress-bar off coverage && s=0 && break || s=$? && sleep 10; done; (exit $s)
for i in $(seq 1 3); do python -m pip install --progress-bar off codecov && s=0 && break || s=$? && sleep 10; done; (exit $s)
mkdir -p ~/ParlAI/test-results
Expand All @@ -109,7 +138,7 @@ commands:
- run:
name: Install torch GPU and dependencies
command: |
python -m pip install --progress-bar off torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
python -m pip install --progress-bar off torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1
python -m pip install --progress-bar off 'fairscale~=0.4.0'
python -m pip install --progress-bar off pytorch-pretrained-bert
python -m pip install --progress-bar off 'transformers==4.20'
Expand All @@ -128,7 +157,7 @@ commands:
name: Install torch CPU and dependencies
command: |
python -m pip install --progress-bar off 'transformers==4.20'
python -m pip install --progress-bar off 'torch==1.11.0'
python -m pip install --progress-bar off 'torch==1.13.1'
python -c 'import torch; print("Torch version:", torch.__version__)'
python -m torch.utils.collect_env

Expand All @@ -138,7 +167,7 @@ commands:
- run:
name: Install torch CPU and dependencies
command: |
python -m pip install --progress-bar off 'torch==1.11.0+cpu' 'torchvision==0.12.0+cpu' 'torchaudio==0.11.0+cpu' -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install --progress-bar off 'torch==1.13.1' 'torchvision==0.14.1' 'torchaudio==0.13.1'
python -m pip install --progress-bar off 'transformers==4.20'
python -c 'import torch; print("Torch version:", torch.__version__)'
python -m torch.utils.collect_env
Expand Down Expand Up @@ -219,26 +248,26 @@ commands:
- setupcuda
- fixgit
- restore_cache:
key: deps-20230209-<< parameters.cachename >>-{{ checksum "requirements.txt" }}
key: deps-20230404-<< parameters.cachename >>-{{ checksum "requirements.txt" }}
- setup
- installdeps
- << parameters.more_installs >>
- save_cache:
key: deps-20230209-<< parameters.cachename >>-{{ checksum "requirements.txt" }}
key: deps-20230404-<< parameters.cachename >>-{{ checksum "requirements.txt" }}
paths:
- "~/venv/bin"
- "~/venv/lib"
- findtests:
marker: << parameters.marker >>
- restore_cache:
key: data-20230209-<< parameters.cachename >>-{{ checksum "teststorun.txt" }}
key: data-20230404-<< parameters.cachename >>-{{ checksum "teststorun.txt" }}
- run:
name: Run tests
no_output_timeout: 60m
command: |
coverage run -m pytest -m << parameters.marker >> << parameters.pytest_flags >> --junitxml=test-results/junit.xml
- save_cache:
key: data-20230209-<< parameters.cachename >>-{{ checksum "teststorun.txt" }}
key: data-20230404-<< parameters.cachename >>-{{ checksum "teststorun.txt" }}
paths:
- "~/ParlAI/data"
- codecov
Expand All @@ -255,12 +284,12 @@ commands:
- checkout
- fixgit
- restore_cache:
key: deps-20230209-bw-{{ checksum "requirements.txt" }}
key: deps-20230404-bw-{{ checksum "requirements.txt" }}
- setup
- installdeps
- installtorchgpu
- save_cache:
key: deps-20230209-bw-{{ checksum "requirements.txt" }}
key: deps-20230404-bw-{{ checksum "requirements.txt" }}
paths:
- "~/venv/bin"
- "~/venv/lib"
Expand Down Expand Up @@ -301,7 +330,7 @@ commands:
# -------------------------------------------------------------------------------------
jobs:
cleaninstall_38:
executor: standard_cpu38
executor: small_cpu38
working_directory: ~/ParlAI
parallelism: 1
steps:
Expand All @@ -312,7 +341,8 @@ jobs:
name: Test installation instructions
no_output_timeout: 60m
command: |
python -m pip install --progress-bar off pip setuptools==62.3.4
python -m pip install --progress-bar off --upgrade pip
python -m pip install --progress-bar off pip setuptools
python setup.py develop
parlai display_data -t integration_tests

Expand Down
8 changes: 5 additions & 3 deletions parlai/crowdsourcing/tasks/dialcrowd/dialcrowd_blueprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import logging
import os
from dataclasses import dataclass, field
from typing import Any, Dict, Iterable, TYPE_CHECKING
from typing import Any, Dict, List, Union, TYPE_CHECKING

from mephisto.operations.registry import register_mephisto_abstraction
from mephisto.abstractions.blueprint import SharedTaskState
Expand Down Expand Up @@ -53,6 +53,8 @@ class DialCrowdStaticBlueprint(StaticReactBlueprint):
definitions.
"""

_initialization_data_dicts: Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]

ArgsClass = DialCrowdStaticBlueprintArgs
BLUEPRINT_TYPE = STATIC_BLUEPRINT_TYPE

Expand All @@ -67,7 +69,7 @@ def __init__(
f'subtasks_per_unit must be greater than zero but was {self.subtasks_per_unit}'
)

self.raw_data: Iterable[Dict[str, Any]] = self._initialization_data_dicts
self.raw_data = self._initialization_data_dicts

# Now chunk the data into groups of <num_subtasks>
grouped_data = []
Expand All @@ -77,7 +79,7 @@ def __init__(
for i in range(0, len(self._initialization_data_dicts), self.subtasks_per_unit):
chunk = self._initialization_data_dicts[i : i + self.subtasks_per_unit]
grouped_data.append(chunk)
self._initialization_data_dicts = grouped_data
self._initialization_data_dicts = grouped_data # type: ignore
# Last group may have less unless an exact multiple
logging.info(
f'Grouped data into {len(self._initialization_data_dicts)} tasks with {self.subtasks_per_unit} subtasks each.'
Expand Down
5 changes: 5 additions & 0 deletions parlai/crowdsourcing/tasks/multi_model_chat/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import os
import random
from dataclasses import dataclass, field
from typing import Any, Dict, Iterable, List, Optional, TYPE_CHECKING
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING

import numpy as np
from mephisto.operations.registry import register_mephisto_abstraction
Expand Down Expand Up @@ -115,6 +115,8 @@ class TurnAnnotationsStaticBlueprint(StaticReactBlueprint):
definitions.
"""

_initialization_data_dicts: Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]

ArgsClass = TurnAnnotationsStaticBlueprintArgs
BLUEPRINT_TYPE = STATIC_BLUEPRINT_TYPE

Expand All @@ -133,7 +135,7 @@ def __init__(
f'subtasks_per_unit must be greater than zero but was {self.subtasks_per_unit}'
)

self.raw_data: Iterable[Dict[str, Any]] = self._initialization_data_dicts
self.raw_data = self._initialization_data_dicts

# Load from file if needed specifying which utterances within each
# conversation to annotate
Expand Down Expand Up @@ -180,7 +182,7 @@ def __init__(
for i in range(0, len(self._initialization_data_dicts), self.subtasks_per_unit):
chunk = self._initialization_data_dicts[i : i + self.subtasks_per_unit]
grouped_data.append(chunk)
self._initialization_data_dicts = grouped_data
self._initialization_data_dicts = grouped_data # type: ignore
# Last group may have less unless an exact multiple
logging.info(
f'Grouped data into {len(self._initialization_data_dicts)} tasks with {self.subtasks_per_unit} subtasks each.'
Expand Down Expand Up @@ -397,7 +399,7 @@ def __init__(
# (quality control will always be last subtask)
# TODO: I don't think we need to re-chunk this actually; just iterate
# over the data and add the quality control task
all_data = []
all_data: List[Any] = []
for grp in self._initialization_data_dicts:
all_data.extend(grp)

Expand Down
8 changes: 4 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ gitdb2==2.0.5
GitPython==3.0.3
hydra-core>=1.1.0
ipython==7.31.1
torch<1.13.0,>=1.4.0
torchvision<0.14.0,>=0.5.0
torch==1.13.1
torchvision==0.14.1
joblib==1.2.0
nltk==3.6.6
omegaconf>=2.1.1
Expand Down Expand Up @@ -44,7 +44,7 @@ subword-nmt==0.3.7
tensorboardX<=2.5.0
tokenizers>=0.8.0
tomli>=2.0.0
torchtext>=0.5.0,<0.14.0
torchtext==0.14.1
tornado==6.0.4
tqdm~=4.62.1
typing-extensions==3.7.4.3
Expand All @@ -59,4 +59,4 @@ ninja~=1.10.2.3
protobuf<=3.20.3, >=3.8.0
contractions~=0.1.72
fsspec~=2022.2.0
google-api-core<=2.11.0 # Latest 2.10.2 requires latest protobuf
google-api-core<=2.11.0
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
long_description=readme,
long_description_content_type='text/markdown',
url='http://parl.ai/',
python_requires='>=3.7',
python_requires='>=3.8',
packages=find_packages(exclude=('data', 'docs', 'tests', 'parlai_internal*')),
install_requires=reqs,
include_package_data=True,
Expand Down