diff --git a/pytext/config/component.py b/pytext/config/component.py index 2a0dcd9b6..127bf27bb 100644 --- a/pytext/config/component.py +++ b/pytext/config/component.py @@ -203,7 +203,8 @@ def create_component(component_type: ComponentType, config: Any, *args, **kwargs try: return cls.from_config(config, *args, **kwargs) except TypeError as e: - raise Exception(f"Can't create component {cls}: {str(e)}") + raise e + # raise Exception(f"Can't create component {cls}: {str(e)}") def create_data_handler(data_handler_config, *args, **kwargs): diff --git a/pytext/models/seq_models/seq2seq_model.py b/pytext/models/seq_models/seq2seq_model.py index 09f0ab49c..c481706e9 100644 --- a/pytext/models/seq_models/seq2seq_model.py +++ b/pytext/models/seq_models/seq2seq_model.py @@ -107,8 +107,8 @@ def arrange_model_inputs( torch.Tensor, torch.Tensor, ]: - src_tokens, src_lengths, _ = tensor_dict["src_seq_tokens"] - trg_tokens, trg_lengths, _ = tensor_dict["trg_seq_tokens"] + src_tokens, src_lengths = tensor_dict["src_seq_tokens"] + trg_tokens, trg_lengths = tensor_dict["trg_seq_tokens"] def _shift_target(in_sequences, seq_lens, eos_idx, pad_idx): shifted_sequence = GetTensor( @@ -136,7 +136,7 @@ def _shift_target(in_sequences, seq_lens, eos_idx, pad_idx): ) def arrange_targets(self, tensor_dict): - trg_tokens, trg_lengths, _ = tensor_dict["trg_seq_tokens"] + trg_tokens, trg_lengths = tensor_dict["trg_seq_tokens"] return (trg_tokens, trg_lengths) def __init__( @@ -196,7 +196,7 @@ def forward( ): additional_features: List[List[torch.Tensor]] = [] - if dict_feats: + if dict_feats is not None: additional_features.append(list(dict_feats)) if contextual_token_embedding is not None: @@ -206,7 +206,7 @@ def forward( src_tokens, additional_features, src_lengths, trg_tokens ) - if dict_feats: + if dict_feats is not None: ( output_dict["dict_tokens"], output_dict["dict_weights"],