Skip to content

Commit

Permalink
remove leftovers from #371: is_training parameter in taskmodule imple…
Browse files Browse the repository at this point in the history
…mentations (#376)
  • Loading branch information
ArneBinder authored Nov 13, 2023
1 parent 65c3d53 commit 1a481d6
Show file tree
Hide file tree
Showing 6 changed files with 0 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ def _post_prepare(self):
def encode_input(
self,
document: DocumentType,
is_training: bool = False,
) -> TaskEncodingType:
"""
Create one or multiple task encodings for the given document.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,6 @@ def _create_relation_candidates(
def encode_input(
self,
document: TextDocument,
is_training: bool = False,
) -> Optional[Union[TaskEncodingType, Sequence[TaskEncodingType],]]:
relations: Sequence[BinaryRelation]
if self.create_relation_candidates:
Expand Down
1 change: 0 additions & 1 deletion src/pytorch_ie/taskmodules/transformer_seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ def encode_text(self, text: str) -> InputEncodingType:
def encode_input(
self,
document: TextDocument,
is_training: bool = False,
) -> Optional[Union[TaskEncodingType, Sequence[TaskEncodingType],]]:
return TaskEncoding(
document=document,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,6 @@ def prepare(self, documents: Sequence[TextDocument]) -> None:
def encode_input(
self,
document: TextDocument,
is_training: bool = False,
) -> Optional[Union[TaskEncodingType, Sequence[TaskEncodingType],]]:
partitions: Sequence[Span]
if self.single_sentence:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ def prepare(self, documents: Sequence[TextDocument]) -> None:
def encode_input(
self,
document: TextDocument,
is_training: bool = False,
) -> Optional[Union[TaskEncodingType, Sequence[TaskEncodingType],]]:
inputs = self.tokenizer(
document.text,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ def encode_text(
def encode_input(
self,
document: TextDocument,
is_training: bool = False,
) -> Optional[Union[TaskEncodingType, Sequence[TaskEncodingType],]]:
partitions: Sequence[Optional[Span]]
if self.partition_annotation is not None:
Expand Down

0 comments on commit 1a481d6

Please sign in to comment.