From 280a350912eeb53fd325cb74d3c7f76df64eb6ed Mon Sep 17 00:00:00 2001 From: Kamal Raj Kanakarajan Date: Sun, 1 Feb 2026 16:30:50 +0530 Subject: [PATCH] Allow max_seq_len configuration in multimodal export --- optimum/exporters/executorch/tasks/multimodal_text_to_text.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/optimum/exporters/executorch/tasks/multimodal_text_to_text.py b/optimum/exporters/executorch/tasks/multimodal_text_to_text.py index 7fc7811..91a4780 100644 --- a/optimum/exporters/executorch/tasks/multimodal_text_to_text.py +++ b/optimum/exporters/executorch/tasks/multimodal_text_to_text.py @@ -62,7 +62,8 @@ def load_multimodal_text_to_text_model(model_name_or_path: str, **kwargs): attn_implementation = kwargs.get("attn_implementation", "custom_sdpa" if use_custom_sdpa else "sdpa") cache_implementation = kwargs.get("cache_implementation", "static") use_custom_sdpa = use_custom_sdpa or attn_implementation == "custom_sdpa" - max_length = kwargs.get("max_length", 2048) + max_seq_len = kwargs.get("max_seq_len", None) + max_length = max_seq_len if max_seq_len is not None else kwargs.get("max_length", 2048) config = kwargs.get("config") or AutoConfig.from_pretrained(model_name_or_path) # Load preprocessor_config.json if it exists