From 828d05f77e3e0655c059cba07e576c67a9378c77 Mon Sep 17 00:00:00 2001 From: Jimmy Lin Date: Mon, 18 Oct 2021 18:30:13 -0400 Subject: [PATCH] Fix score overflow issue in ScoreTiesAdjusterReranker for SPLADEv2 (#1658) The method of rounding used in ScoreTiesAdjusterReranker causes overflow issues because SPLADEv2 scores can get very big. Doesn't affect the actual ordering of docs, hence everything is fine with MS MARCO output format, but does cause issues with TREC format and trec_eval. With this bug fixed, added SPLADEv2 regressions. --- README.md | 1 + docs/experiments-msmarco-passage-splade-v2.md | 6 +- ...ions-msmarco-passage-distill-splade-max.md | 91 +++++++++++++++++++ ...msmarco-passage-unicoil-tilde-expansion.md | 14 +-- docs/regressions-msmarco-passage-unicoil.md | 10 +- .../rerank/lib/ScoreTiesAdjusterReranker.java | 3 +- ...smarco-passage-distill-splade-max.template | 71 +++++++++++++++ .../msmarco-passage-unicoil.template | 4 +- .../msmarco-passage-distill-splade-max.yaml | 71 +++++++++++++++ ...marco-passage-unicoil-tilde-expansion.yaml | 8 +- .../regression/msmarco-passage-unicoil.yaml | 2 +- 11 files changed, 258 insertions(+), 23 deletions(-) create mode 100644 docs/regressions-msmarco-passage-distill-splade-max.md create mode 100644 src/main/resources/docgen/templates/msmarco-passage-distill-splade-max.template create mode 100644 src/main/resources/regression/msmarco-passage-distill-splade-max.yaml diff --git a/README.md b/README.md index 47a3156a75..74ee92251c 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ For the most part, these runs are based on [_default_ parameter settings](https: + Regressions for MS MARCO Passage Ranking: [baselines](docs/regressions-msmarco-passage.md), [doc2query](docs/regressions-msmarco-passage-doc2query.md), [doc2query-T5](docs/regressions-msmarco-passage-docTTTTTquery.md) + Regressions for MS MARCO Passage Ranking: [DeepImpact](docs/regressions-msmarco-passage-deepimpact.md) + Regressions for MS MARCO Passage Ranking: [uniCOIL with doc2query-T5](docs/regressions-msmarco-passage-unicoil.md), [uniCOIL with TILDE](docs/regressions-msmarco-passage-unicoil-tilde-expansion.md) ++ Regressions for MS MARCO Passage Ranking: [SPLADEv2](docs/regressions-msmarco-passage-distill-splade-max.md) + Regressions for MS MARCO Document Ranking, Per Doc: [baselines](docs/regressions-msmarco-doc.md), [doc2query-T5](docs/regressions-msmarco-doc-docTTTTTquery-per-doc.md) + Regressions for MS MARCO Document Ranking, Per Passage: [baselines](docs/regressions-msmarco-doc-per-passage.md), [doc2query-T5](docs/regressions-msmarco-doc-docTTTTTquery-per-passage.md) + Regressions for TREC 2019 Deep Learning (Passage): [baselines](docs/regressions-dl19-passage.md), [doc2query-T5](docs/regressions-dl19-passage-docTTTTTquery.md) diff --git a/docs/experiments-msmarco-passage-splade-v2.md b/docs/experiments-msmarco-passage-splade-v2.md index b9640f2f48..df5ef3c5d1 100644 --- a/docs/experiments-msmarco-passage-splade-v2.md +++ b/docs/experiments-msmarco-passage-splade-v2.md @@ -1,11 +1,11 @@ # Anserini: SPLADEv2 for MS MARCO V1 Passage Ranking -This page describes how to reproduce with Pyserini the DistilSPLADE-max experiments in the following paper: +This page describes how to reproduce the SPLADEv2 results with the DistilSPLADE-max model from the following paper: > Thibault Formal, Carlos Lassance, Benjamin Piwowarski, Stéphane Clinchant. [SPLADE v2: Sparse Lexical and Expansion Model for Information Retrieval.](https://arxiv.org/abs/2109.10086) _arXiv:2109.10086_. -Here, we start with a version of the MS MARCO passage corpus that has already been processed with SPLADE, i.e., gone through document expansion and term reweighting. -Thus, no neural inference is involved. As SPLADE weights are given in fp16, they have been converted to integer by taking the round of weight*100. +Here, we start with a version of the MS MARCO passage corpus that has already been processed with the model, i.e., gone through document expansion and term reweighting. +Thus, no neural inference is involved. As the model weights are provided in fp16, they have been converted to integers by taking the round of weight*100. Note that Pyserini provides [a comparable reproduction guide](https://github.com/castorini/pyserini/blob/master/docs/experiments-spladev2.md), so if you don't like Java, you can get _exactly_ the same results from Python. diff --git a/docs/regressions-msmarco-passage-distill-splade-max.md b/docs/regressions-msmarco-passage-distill-splade-max.md new file mode 100644 index 0000000000..ba302b8fe7 --- /dev/null +++ b/docs/regressions-msmarco-passage-distill-splade-max.md @@ -0,0 +1,91 @@ +# Anserini: Regressions for SPLADEv2 on [MS MARCO Passage](https://github.com/microsoft/MSMARCO-Passage-Ranking) + +This page documents regression experiments for the DistilSPLADE-max model from SPLADEv2 on the MS MARCO Passage Ranking Task, which is integrated into Anserini's regression testing framework. +The model is described in the following paper: + +> Thibault Formal, Carlos Lassance, Benjamin Piwowarski, Stéphane Clinchant. [SPLADE v2: Sparse Lexical and Expansion Model for Information Retrieval.](https://arxiv.org/abs/2109.10086) _arXiv:2109.10086_. + +For more complete instructions on how to run end-to-end experiments, refer to [this page](experiments-msmarco-passage-splade-v2.md). + +The exact configurations for these regressions are stored in [this YAML file](../src/main/resources/regression/msmarco-passage-distill-splade-max.yaml). +Note that this page is automatically generated from [this template](../src/main/resources/docgen/templates/msmarco-passage-distill-splade-max.template) as part of Anserini's regression pipeline, so do not modify this page directly; modify the template instead. + +## Indexing + +Typical indexing command: + +``` +nohup sh target/appassembler/bin/IndexCollection -collection JsonVectorCollection \ + -input /path/to/msmarco-passage-distill-splade-max \ + -index indexes/lucene-index.msmarco-passage-distill-splade-max \ + -generator DefaultLuceneDocumentGenerator \ + -threads 16 -impact -pretokenized \ + >& logs/log.msmarco-passage-distill-splade-max & +``` + +The directory `/path/to/msmarco-passage-splade-v2/` should be a directory containing the compressed `jsonl` files that comprise the corpus. +See [this page](experiments-msmarco-passage-splade-v2.md) for additional details. + +For additional details, see explanation of [common indexing options](common-indexing-options.md). + +## Retrieval + +Topics and qrels are stored in [`src/main/resources/topics-and-qrels/`](../src/main/resources/topics-and-qrels/). +The regression experiments here evaluate on the 6980 dev set questions; see [this page](experiments-msmarco-passage.md) for more details. + +After indexing has completed, you should be able to perform retrieval as follows: + +``` +nohup target/appassembler/bin/SearchCollection -index indexes/lucene-index.msmarco-passage-distill-splade-max \ + -topicreader TsvInt -topics src/main/resources/topics-and-qrels/topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz \ + -output runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz \ + -impact -pretokenized & +``` + +Evaluation can be performed using `trec_eval`: + +``` +tools/eval/trec_eval.9.0.4/trec_eval -m map -c -m recip_rank -c -m recall.1000 -c src/main/resources/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz +``` + +## Effectiveness + +With the above commands, you should be able to reproduce the following results: + +MAP | DistilSPLADE-max| +:---------------------------------------|-----------| +[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3746 | + + +MRR | DistilSPLADE-max| +:---------------------------------------|-----------| +[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3798 | + + +R@1000 | DistilSPLADE-max| +:---------------------------------------|-----------| +[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.9787 | + +The above runs are in TREC output format and evaluated with `trec_eval`. +In order to reproduce results reported in the paper, we need to convert to MS MARCO output format and then evaluate: + +```bash +python tools/scripts/msmarco/convert_trec_to_msmarco_run.py \ + --input runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz \ + --output runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz.msmarco --quiet + +python tools/scripts/msmarco/msmarco_passage_eval.py \ + tools/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt \ + runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz.msmarco +``` + +The results should be as follows: + +``` +##################### +MRR @10: 0.36852691363078205 +QueriesRanked: 6980 +##################### +``` + +This corresponds to the effectiveness reported in the paper. \ No newline at end of file diff --git a/docs/regressions-msmarco-passage-unicoil-tilde-expansion.md b/docs/regressions-msmarco-passage-unicoil-tilde-expansion.md index d032e8ed84..5c2f34c575 100644 --- a/docs/regressions-msmarco-passage-unicoil-tilde-expansion.md +++ b/docs/regressions-msmarco-passage-unicoil-tilde-expansion.md @@ -38,31 +38,31 @@ After indexing has completed, you should be able to perform retrieval as follows ``` nohup target/appassembler/bin/SearchCollection -index indexes/lucene-index.msmarco-passage-unicoil-tilde-expansion \ -topicreader TsvInt -topics src/main/resources/topics-and-qrels/topics.msmarco-passage.dev-subset.unicoil-tilde-expansion.tsv.gz \ - -output runs/run.msmarco-passage-unicoil-tilde-expansion.unicoil.topics.msmarco-passage.dev-subset.unicoil-tilde-expansion.tsv.gz \ + -output runs/run.msmarco-passage-unicoil-tilde-expansion.unicoil-tilde-expansion.topics.msmarco-passage.dev-subset.unicoil-tilde-expansion.tsv.gz \ -impact -pretokenized & ``` Evaluation can be performed using `trec_eval`: ``` -tools/eval/trec_eval.9.0.4/trec_eval -m map -c -m recip_rank -c -m recall.1000 -c src/main/resources/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt runs/run.msmarco-passage-unicoil-tilde-expansion.unicoil.topics.msmarco-passage.dev-subset.unicoil-tilde-expansion.tsv.gz +tools/eval/trec_eval.9.0.4/trec_eval -m map -c -m recip_rank -c -m recall.1000 -c src/main/resources/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt runs/run.msmarco-passage-unicoil-tilde-expansion.unicoil-tilde-expansion.topics.msmarco-passage.dev-subset.unicoil-tilde-expansion.tsv.gz ``` ## Effectiveness With the above commands, you should be able to reproduce the following results: -MAP | uniCOIL | +MAP | uniCOIL w/ TILDE expansion| :---------------------------------------|-----------| -[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3560 | +[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3561 | -MRR | uniCOIL | +MRR | uniCOIL w/ TILDE expansion| :---------------------------------------|-----------| -[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3606 | +[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3607 | -R@1000 | uniCOIL | +R@1000 | uniCOIL w/ TILDE expansion| :---------------------------------------|-----------| [MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.9646 | diff --git a/docs/regressions-msmarco-passage-unicoil.md b/docs/regressions-msmarco-passage-unicoil.md index 255d846ca8..b2cd5a2860 100644 --- a/docs/regressions-msmarco-passage-unicoil.md +++ b/docs/regressions-msmarco-passage-unicoil.md @@ -5,7 +5,7 @@ The uniCOIL model is described in the following paper: > Jimmy Lin and Xueguang Ma. [A Few Brief Notes on DeepImpact, COIL, and a Conceptual Framework for Information Retrieval Techniques.](https://arxiv.org/abs/2106.14807) _arXiv:2106.14807_. -For more complete instructions on how to run end-to-end experiments, refer to [this page](experiments-msmarco-passage-unicoil.md). +For more complete instructions on how to run end-to-end experiments, refer to [this page](experiments-msmarco-unicoil.md). The exact configurations for these regressions are stored in [this YAML file](../src/main/resources/regression/msmarco-passage-unicoil.yaml). Note that this page is automatically generated from [this template](../src/main/resources/docgen/templates/msmarco-passage-unicoil.template) as part of Anserini's regression pipeline, so do not modify this page directly; modify the template instead. @@ -24,7 +24,7 @@ nohup sh target/appassembler/bin/IndexCollection -collection JsonVectorCollectio ``` The directory `/path/to/msmarco-passage-unicoil/` should be a directory containing the compressed `jsonl` files that comprise the corpus. -See [this page](experiments-msmarco-passage-unicoil.md) for additional details. +See [this page](experiments-msmarco-unicoil.md) for additional details. For additional details, see explanation of [common indexing options](common-indexing-options.md). @@ -52,17 +52,17 @@ tools/eval/trec_eval.9.0.4/trec_eval -m map -c -m recip_rank -c -m recall.1000 - With the above commands, you should be able to reproduce the following results: -MAP | uniCOIL | +MAP | uniCOIL w/ doc2query-T5 expansion| :---------------------------------------|-----------| [MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3574 | -MRR | uniCOIL | +MRR | uniCOIL w/ doc2query-T5 expansion| :---------------------------------------|-----------| [MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.3625 | -R@1000 | uniCOIL | +R@1000 | uniCOIL w/ doc2query-T5 expansion| :---------------------------------------|-----------| [MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)| 0.9582 | diff --git a/src/main/java/io/anserini/rerank/lib/ScoreTiesAdjusterReranker.java b/src/main/java/io/anserini/rerank/lib/ScoreTiesAdjusterReranker.java index 3c1b84f2b4..c108bf3baa 100644 --- a/src/main/java/io/anserini/rerank/lib/ScoreTiesAdjusterReranker.java +++ b/src/main/java/io/anserini/rerank/lib/ScoreTiesAdjusterReranker.java @@ -34,7 +34,8 @@ public ScoredDocuments rerank(ScoredDocuments docs, RerankerContext context) { int dup = 0; for (int i=0; i Thibault Formal, Carlos Lassance, Benjamin Piwowarski, Stéphane Clinchant. [SPLADE v2: Sparse Lexical and Expansion Model for Information Retrieval.](https://arxiv.org/abs/2109.10086) _arXiv:2109.10086_. + +For more complete instructions on how to run end-to-end experiments, refer to [this page](experiments-msmarco-passage-splade-v2.md). + +The exact configurations for these regressions are stored in [this YAML file](../src/main/resources/regression/msmarco-passage-distill-splade-max.yaml). +Note that this page is automatically generated from [this template](../src/main/resources/docgen/templates/msmarco-passage-distill-splade-max.template) as part of Anserini's regression pipeline, so do not modify this page directly; modify the template instead. + +## Indexing + +Typical indexing command: + +``` +${index_cmds} +``` + +The directory `/path/to/msmarco-passage-splade-v2/` should be a directory containing the compressed `jsonl` files that comprise the corpus. +See [this page](experiments-msmarco-passage-splade-v2.md) for additional details. + +For additional details, see explanation of [common indexing options](common-indexing-options.md). + +## Retrieval + +Topics and qrels are stored in [`src/main/resources/topics-and-qrels/`](../src/main/resources/topics-and-qrels/). +The regression experiments here evaluate on the 6980 dev set questions; see [this page](experiments-msmarco-passage.md) for more details. + +After indexing has completed, you should be able to perform retrieval as follows: + +``` +${ranking_cmds} +``` + +Evaluation can be performed using `trec_eval`: + +``` +${eval_cmds} +``` + +## Effectiveness + +With the above commands, you should be able to reproduce the following results: + +${effectiveness} + +The above runs are in TREC output format and evaluated with `trec_eval`. +In order to reproduce results reported in the paper, we need to convert to MS MARCO output format and then evaluate: + +```bash +python tools/scripts/msmarco/convert_trec_to_msmarco_run.py \ + --input runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz \ + --output runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz.msmarco --quiet + +python tools/scripts/msmarco/msmarco_passage_eval.py \ + tools/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt \ + runs/run.msmarco-passage-distill-splade-max.distill-splade-max.topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz.msmarco +``` + +The results should be as follows: + +``` +##################### +MRR @10: 0.36852691363078205 +QueriesRanked: 6980 +##################### +``` + +This corresponds to the effectiveness reported in the paper. \ No newline at end of file diff --git a/src/main/resources/docgen/templates/msmarco-passage-unicoil.template b/src/main/resources/docgen/templates/msmarco-passage-unicoil.template index 21964fbd62..9c9ac58b80 100644 --- a/src/main/resources/docgen/templates/msmarco-passage-unicoil.template +++ b/src/main/resources/docgen/templates/msmarco-passage-unicoil.template @@ -5,7 +5,7 @@ The uniCOIL model is described in the following paper: > Jimmy Lin and Xueguang Ma. [A Few Brief Notes on DeepImpact, COIL, and a Conceptual Framework for Information Retrieval Techniques.](https://arxiv.org/abs/2106.14807) _arXiv:2106.14807_. -For more complete instructions on how to run end-to-end experiments, refer to [this page](experiments-msmarco-passage-unicoil.md). +For more complete instructions on how to run end-to-end experiments, refer to [this page](experiments-msmarco-unicoil.md). The exact configurations for these regressions are stored in [this YAML file](../src/main/resources/regression/msmarco-passage-unicoil.yaml). Note that this page is automatically generated from [this template](../src/main/resources/docgen/templates/msmarco-passage-unicoil.template) as part of Anserini's regression pipeline, so do not modify this page directly; modify the template instead. @@ -19,7 +19,7 @@ ${index_cmds} ``` The directory `/path/to/msmarco-passage-unicoil/` should be a directory containing the compressed `jsonl` files that comprise the corpus. -See [this page](experiments-msmarco-passage-unicoil.md) for additional details. +See [this page](experiments-msmarco-unicoil.md) for additional details. For additional details, see explanation of [common indexing options](common-indexing-options.md). diff --git a/src/main/resources/regression/msmarco-passage-distill-splade-max.yaml b/src/main/resources/regression/msmarco-passage-distill-splade-max.yaml new file mode 100644 index 0000000000..b7acaf2c73 --- /dev/null +++ b/src/main/resources/regression/msmarco-passage-distill-splade-max.yaml @@ -0,0 +1,71 @@ +--- +name: msmarco-passage-distill-splade-max +index_command: target/appassembler/bin/IndexCollection +index_utils_command: target/appassembler/bin/IndexReaderUtils +search_command: target/appassembler/bin/SearchCollection +topic_root: src/main/resources/topics-and-qrels/ +qrels_root: src/main/resources/topics-and-qrels/ +index_root: +ranking_root: +collection: JsonVectorCollection +generator: DefaultLuceneDocumentGenerator +threads: 16 +index_options: + - -impact + - -pretokenized +topic_reader: TsvInt +evals: + - command: tools/eval/trec_eval.9.0.4/trec_eval + params: + - -m map + - -c + separator: "\t" + parse_index: 2 + metric: map + metric_precision: 4 + can_combine: true + - command: tools/eval/trec_eval.9.0.4/trec_eval + params: + - -m recip_rank + - -c + separator: "\t" + parse_index: 2 + metric: mrr + metric_precision: 4 + can_combine: true + - command: tools/eval/trec_eval.9.0.4/trec_eval + params: + - -m recall.1000 + - -c + separator: "\t" + parse_index: 2 + metric: R@1000 + metric_precision: 4 + can_combine: true +input_roots: + - /tuna1/ # on tuna + - /store/ # on orca + - /scratch2/ # on damiano +input: collections/msmarco/msmarco-passage-distill-splade-max/ +index_path: indexes/lucene-index.msmarco-passage-distill-splade-max +index_stats: + documents: 8841823 + documents (non-empty): 8841823 + total terms: 95445422483 +topics: + - name: "[MS MARCO Passage: Dev](https://github.com/microsoft/MSMARCO-Passage-Ranking)" + path: topics.msmarco-passage.dev-subset.distill-splade-max.tsv.gz + qrel: qrels.msmarco-passage.dev-subset.txt +models: + - name: distill-splade-max + display: DistilSPLADE-max + params: + - -impact -pretokenized + results: + map: + - 0.3746 + mrr: + - 0.3798 + R@1000: + - 0.9787 + diff --git a/src/main/resources/regression/msmarco-passage-unicoil-tilde-expansion.yaml b/src/main/resources/regression/msmarco-passage-unicoil-tilde-expansion.yaml index e5ccfb454d..d329566ef1 100644 --- a/src/main/resources/regression/msmarco-passage-unicoil-tilde-expansion.yaml +++ b/src/main/resources/regression/msmarco-passage-unicoil-tilde-expansion.yaml @@ -57,15 +57,15 @@ topics: path: topics.msmarco-passage.dev-subset.unicoil-tilde-expansion.tsv.gz qrel: qrels.msmarco-passage.dev-subset.txt models: - - name: unicoil - display: uniCOIL + - name: unicoil-tilde-expansion + display: uniCOIL w/ TILDE expansion params: - -impact -pretokenized results: map: - - 0.3560 + - 0.3561 mrr: - - 0.3606 + - 0.3607 R@1000: - 0.9646 diff --git a/src/main/resources/regression/msmarco-passage-unicoil.yaml b/src/main/resources/regression/msmarco-passage-unicoil.yaml index 527d4f2061..1784cf2c3c 100644 --- a/src/main/resources/regression/msmarco-passage-unicoil.yaml +++ b/src/main/resources/regression/msmarco-passage-unicoil.yaml @@ -58,7 +58,7 @@ topics: qrel: qrels.msmarco-passage.dev-subset.txt models: - name: unicoil - display: uniCOIL + display: uniCOIL w/ doc2query-T5 expansion params: - -impact -pretokenized results: