Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use poetry #46

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# python cache
__pycache__

# Downloaded models and data
meteor-1.5.jar
stanford-corenlp-full-2018-10-05
deps.words
329 changes: 214 additions & 115 deletions README.md

Large diffs are not rendered by default.

3,003 changes: 3,003 additions & 0 deletions evaluation/poetry.lock

Large diffs are not rendered by default.

59 changes: 59 additions & 0 deletions evaluation/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
[tool.poetry]
name = "summ-eval"
version = "0.893"
description = "Toolkit for summarization evaluation"
authors = ["Alex Fabbri <[email protected]>, Wojciech Kryściński <[email protected]>"]
license = "MIT"
readme = "../README.md"
packages = [{include = "summ_eval"}]


[tool.poetry.group.dev]
optional = true

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"


[tool.project.urls]
home = "https://github.com/Yale-LILY/SummEval"
repo = "https://github.com/Yale-LILY/SummEval"

[tool.poetry.dependencies]
python = ">=3.9,<3.13"
bert-score = "^0.3.13"
gin-config = "^0.5.0"
moverscore = "^1.0.3"
pytorch-pretrained-bert = "^0.6.2"
psutil = "^5.9.5"
six = "^1.16.0"
numpy = ">=1.11.0"
stanza = "^1.5.0"
sacremoses = "^0.0.53"
transformers = ">=2.2.0"
spacy = ">=2.2.0"
sacrebleu = "^2.3.1"
pyemd = "^1.0.0"
click = "^8.1.6"
nltk = "^3.8.1"
cython = "^3.0.0"
scipy = "^1.11.1"
networkx = "^3.1"
blanc = "^0.3.3"
wmd = {git = "https://github.com/src-d/wmd-relax.git"} # Update to >1.3.2 when available
scikit-learn = "^1.3.0"
pyrouge = { git = "https://github.com/bheinzerling/pyrouge.git", optional = true }
# boto3 = "1.28.29" # Uncomment for faster dependency resolution then rerun `poetry lock --no-update` with it commented


[tool.poetry.extras]
rouge = [ "pyrouge" ]


[tool.poetry.scripts]
calc-scores = 'summ_eval.calc_scores:cli_main'


[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
51 changes: 0 additions & 51 deletions evaluation/setup.py

This file was deleted.

Binary file modified evaluation/summ_eval/models/en/svr.pyr_score.en.pkl
Binary file not shown.
Binary file modified evaluation/summ_eval/models/en/svr.responsiveness.en.pkl
Binary file not shown.
18 changes: 15 additions & 3 deletions evaluation/summ_eval/supert_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,22 @@
import os
from collections import Counter
from nltk.tokenize import sent_tokenize
import logging
import gin
import sys

from summ_eval.sentence_transformers import SentenceTransformer
from summ_eval.metric import Metric
from summ_eval.supert_utils import parse_documents, get_all_token_vecs, build_pseudo_ref, get_sbert_score, get_token_vecs
from summ_eval.supert_utils import (
parse_documents,
get_all_token_vecs,
build_pseudo_ref,
get_sbert_score,
get_token_vecs,
)


logger = logging.getLogger(__name__)

try:
PYTHONPATH = os.environ['PYTHONPATH']
Expand All @@ -18,8 +29,9 @@
dirname = os.path.dirname(__file__)

if dirname not in PYTHONPATH:
print(f'Please run the following command and add it to your startup script: \n export PYTHONPATH=$PYTHONPATH:{dirname}')
exit()
# This is a hack to make sure that the sentence_transformers folder can be found for loading models
logger.warning(f"{dirname} needs to be set in the `PYTHONPATH` environment variable. Setting it now.")
sys.path.append(dirname)

@gin.configurable
class SupertMetric(Metric):
Expand Down
27 changes: 27 additions & 0 deletions evaluation/tests/test_s3_metric.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# pylint: disable=C0103
import os
import unittest
from summ_eval.s3_metric import S3Metric
from summ_eval.test_util import CAND_R, REF_R, CANDS, REFS, EPS


class TestScore(unittest.TestCase):
def test_score(self):
metric = S3Metric()
score_dict = metric.evaluate_example(CAND_R, REF_R)
self.assertTrue((score_dict["s3_pyr"] - 0.4402288438243088) < EPS)
self.assertTrue((score_dict["s3_resp"] - 0.5103094504071222) < EPS)

def test_score_batch(self):
metric = S3Metric()
score_dict = metric.evaluate_batch(CANDS, REFS)
for predicted, (pyr, resp) in zip(score_dict, [
(1.358148717958252, 1.5925579213409842),
(1.1742432908208689, 1.4061338986807543),
(0.6816419565604588, 0.7101254431464145),
]):
self.assertTrue((predicted["s3_pyr"] - pyr) < EPS)
self.assertTrue((predicted["s3_resp"] - resp) < EPS)

if __name__ == '__main__':
unittest.main()