From b60838f2511f9a73ceae0caafa5703f439821102 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Tue, 8 Sep 2020 22:17:09 +0000 Subject: [PATCH 01/15] detokenization parallelization --- fastseq_cli/transformers_generate.py | 95 ++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 19 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index dda5eb56..e996ac65 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -1,22 +1,71 @@ """From Huggingface Transformers.""" import argparse import json +import time from pathlib import Path - import torch +import time from tqdm import tqdm - -from fastseq_cli.transformers_utils import use_task_specific_params, trim_batch, calculate_rouge, calculate_bleu_score +from multiprocessing import Process, Queue, JoinableQueue, cpu_count from transformers import AutoModelForSeq2SeqLM, AutoTokenizer +from fastseq_cli.transformers_utils import use_task_specific_params, trim_batch, calculate_rouge, calculate_bleu_score DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" +GENERATE_FINISHED = 'done' +POSTPROCESS_FINISHED = None + def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] +class IOProcess (Process) : + def __init__ (self, msg_queue, fout): + super(IOProcess, self).__init__() + self.msg_queue = msg_queue + self.fout = fout + def run (self) : + while (True) : + dec = self.msg_queue.get() + if dec == GENERATE_FINISHED : + break + else : + for hypothesis in dec: + self.fout.write(hypothesis + "\n") + self.fout.flush() + self.msg_queue.close() + self.msg_queue.join_thread() + +class PostProcess (Process) : + def __init__ (self, tokenizer, data_queue, msg_queue) : + super(PostProcess, self).__init__() + self.data_queue = data_queue + self.msg_queue = msg_queue + self.tokenizer = tokenizer + + def run (self) : + while True : + summaries = self.data_queue.get() + if summaries == GENERATE_FINISHED : + self.data_queue.put(POSTPROCESS_FINISHED) + break + elif summaries == POSTPROCESS_FINISHED : + self.data_queue.put(POSTPROCESS_FINISHED) + break + else : + dec = self.tokenizer.batch_decode(summaries, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + self.msg_queue.put(dec) + + self.data_queue.close() + self.data_queue.join_thread() + self.msg_queue.close() + self.msg_queue.join_thread() + self.msg_queue.join() + def generate_summaries_or_translations( examples: list, @@ -28,7 +77,6 @@ def generate_summaries_or_translations( task="summarization", decoder_start_token_id=None, fastseq_opt=True, - no_repeat_ngram_size=None, **gen_kwargs, ) -> None: """Run generation""" @@ -41,36 +89,48 @@ def generate_summaries_or_translations( model = model.half() if decoder_start_token_id is None: decoder_start_token_id = gen_kwargs.pop("decoder_start_token_id", None) - tokenizer = AutoTokenizer.from_pretrained(model_name) - + # update config with summarization specific params use_task_specific_params(model, task) - + data_queue = Queue() + msg_queue = Queue() + p_list = [] + threads = cpu_count() + + for i in range (threads) : + p = PostProcess(tokenizer, data_queue, msg_queue) + p_list.append(p) + p.start() + + io_process = IOProcess( msg_queue, fout) + io_process.start() + for batch in tqdm(list(chunks(examples, batch_size))): if "t5" in model_name: batch = [model.config.prefix + text for text in batch] + torch.cuda.nvtx.range_push("tokenization_step") batch = tokenizer(batch, return_tensors="pt", truncation=True, padding="max_length").to(device) input_ids, attention_mask = trim_batch( **batch, pad_token_id=tokenizer.pad_token_id) + torch.cuda.nvtx.range_pop() summaries = model.generate( input_ids=input_ids, attention_mask=attention_mask, decoder_start_token_id=decoder_start_token_id, - no_repeat_ngram_size=no_repeat_ngram_size, **gen_kwargs, ) - dec = tokenizer.batch_decode(summaries, - skip_special_tokens=True, - clean_up_tokenization_spaces=False) - for hypothesis in dec: - fout.write(hypothesis + "\n") - fout.flush() - - + summaries_cpu = summaries.cpu() + data_queue.put(summaries_cpu) + data_queue.put(GENERATE_FINISHED) + for p in p_list : + p.join() + msg_queue.put(GENERATE_FINISHED) + io_process.join() + def run_generate(): """Entrance is here.""" parser = argparse.ArgumentParser() @@ -116,8 +176,6 @@ def run_generate(): help="How many observations. Defaults to all.") parser.add_argument("--fp16", action="store_true") parser.add_argument("--without_fastseq_opt", action="store_true") - parser.add_argument("--no_repeat_ngram_size", type=int, default=None, - required=False, help="size of no repeat ngram") args = parser.parse_args() examples = [ " " + x.rstrip() if "t5" in args.model_name else x.rstrip() @@ -136,7 +194,6 @@ def run_generate(): task=args.task, decoder_start_token_id=args.decoder_start_token_id, fastseq_opt=not args.without_fastseq_opt, - no_repeat_ngram_size=args.no_repeat_ngram_size, ) if args.reference_path is None: return From 98ea20efbd0147fde7a013847ee39625b54f06a4 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Tue, 8 Sep 2020 22:44:04 +0000 Subject: [PATCH 02/15] minor changes --- fastseq_cli/transformers_generate.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index e996ac65..5f3ffe71 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -1,10 +1,8 @@ """From Huggingface Transformers.""" import argparse import json -import time from pathlib import Path import torch -import time from tqdm import tqdm from multiprocessing import Process, Queue, JoinableQueue, cpu_count from transformers import AutoModelForSeq2SeqLM, AutoTokenizer @@ -77,6 +75,7 @@ def generate_summaries_or_translations( task="summarization", decoder_start_token_id=None, fastseq_opt=True, + no_repeat_ngram_size=None, **gen_kwargs, ) -> None: """Run generation""" @@ -89,6 +88,7 @@ def generate_summaries_or_translations( model = model.half() if decoder_start_token_id is None: decoder_start_token_id = gen_kwargs.pop("decoder_start_token_id", None) + tokenizer = AutoTokenizer.from_pretrained(model_name) # update config with summarization specific params @@ -109,18 +109,17 @@ def generate_summaries_or_translations( for batch in tqdm(list(chunks(examples, batch_size))): if "t5" in model_name: batch = [model.config.prefix + text for text in batch] - torch.cuda.nvtx.range_push("tokenization_step") batch = tokenizer(batch, return_tensors="pt", truncation=True, padding="max_length").to(device) input_ids, attention_mask = trim_batch( **batch, pad_token_id=tokenizer.pad_token_id) - torch.cuda.nvtx.range_pop() summaries = model.generate( input_ids=input_ids, attention_mask=attention_mask, decoder_start_token_id=decoder_start_token_id, + no_repeat_ngram_size=no_repeat_ngram_size, **gen_kwargs, ) summaries_cpu = summaries.cpu() @@ -176,6 +175,8 @@ def run_generate(): help="How many observations. Defaults to all.") parser.add_argument("--fp16", action="store_true") parser.add_argument("--without_fastseq_opt", action="store_true") + parser.add_argument("--no_repeat_ngram_size", type=int, default=None, + required=False, help="size of no repeat ngram") args = parser.parse_args() examples = [ " " + x.rstrip() if "t5" in args.model_name else x.rstrip() @@ -194,6 +195,7 @@ def run_generate(): task=args.task, decoder_start_token_id=args.decoder_start_token_id, fastseq_opt=not args.without_fastseq_opt, + no_repeat_ngram_size=args.no_repeat_ngram_size, ) if args.reference_path is None: return From f40c8f61cbfad62cbee17cf398e008d090ad5a60 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Wed, 9 Sep 2020 00:21:22 +0000 Subject: [PATCH 03/15] adding arguments to Postprocess --- fastseq_cli/transformers_generate.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index 5f3ffe71..e6ddf9d0 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -37,11 +37,13 @@ def run (self) : self.msg_queue.join_thread() class PostProcess (Process) : - def __init__ (self, tokenizer, data_queue, msg_queue) : + def __init__ (self, tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) : super(PostProcess, self).__init__() self.data_queue = data_queue self.msg_queue = msg_queue - self.tokenizer = tokenizer + self.tokenizer = tokenizer + self.clean_up_tokenization_spaces = clean_up_tokenization_spaces + self.skip_special_tokens = skip_special_tokens def run (self) : while True : @@ -54,15 +56,14 @@ def run (self) : break else : dec = self.tokenizer.batch_decode(summaries, - skip_special_tokens=True, - clean_up_tokenization_spaces=False) + self.skip_special_tokens, + self.clean_up_tokenization_spaces) self.msg_queue.put(dec) self.data_queue.close() self.data_queue.join_thread() self.msg_queue.close() self.msg_queue.join_thread() - self.msg_queue.join() def generate_summaries_or_translations( @@ -75,7 +76,9 @@ def generate_summaries_or_translations( task="summarization", decoder_start_token_id=None, fastseq_opt=True, - no_repeat_ngram_size=None, + no_repeat_ngram_size=None, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, **gen_kwargs, ) -> None: """Run generation""" @@ -99,7 +102,7 @@ def generate_summaries_or_translations( threads = cpu_count() for i in range (threads) : - p = PostProcess(tokenizer, data_queue, msg_queue) + p = PostProcess(tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) p_list.append(p) p.start() @@ -177,6 +180,9 @@ def run_generate(): parser.add_argument("--without_fastseq_opt", action="store_true") parser.add_argument("--no_repeat_ngram_size", type=int, default=None, required=False, help="size of no repeat ngram") + parser.add_argument("--include_special_tokens", action="store_true") + parser.add_argument("--leave_tokenization_spaces", action="store_false") + args = parser.parse_args() examples = [ " " + x.rstrip() if "t5" in args.model_name else x.rstrip() @@ -196,7 +202,9 @@ def run_generate(): decoder_start_token_id=args.decoder_start_token_id, fastseq_opt=not args.without_fastseq_opt, no_repeat_ngram_size=args.no_repeat_ngram_size, - ) + skip_special_tokens = not args.include_special_tokens, + clean_up_tokenization_spaces = not args.leave_tokenization_spaces, + ) if args.reference_path is None: return # Compute scores From e70078a64517d51a06a16e29468dcc4dbd18c403 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Wed, 9 Sep 2020 00:24:48 +0000 Subject: [PATCH 04/15] adding arguments to Postprocess --- fastseq_cli/transformers_generate.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index e6ddf9d0..53ebbdc4 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -56,8 +56,8 @@ def run (self) : break else : dec = self.tokenizer.batch_decode(summaries, - self.skip_special_tokens, - self.clean_up_tokenization_spaces) + skip_special_tokens = self.skip_special_tokens, + clean_up_tokenization_spaces = self.clean_up_tokenization_spaces) self.msg_queue.put(dec) self.data_queue.close() @@ -180,8 +180,6 @@ def run_generate(): parser.add_argument("--without_fastseq_opt", action="store_true") parser.add_argument("--no_repeat_ngram_size", type=int, default=None, required=False, help="size of no repeat ngram") - parser.add_argument("--include_special_tokens", action="store_true") - parser.add_argument("--leave_tokenization_spaces", action="store_false") args = parser.parse_args() examples = [ @@ -202,8 +200,8 @@ def run_generate(): decoder_start_token_id=args.decoder_start_token_id, fastseq_opt=not args.without_fastseq_opt, no_repeat_ngram_size=args.no_repeat_ngram_size, - skip_special_tokens = not args.include_special_tokens, - clean_up_tokenization_spaces = not args.leave_tokenization_spaces, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, ) if args.reference_path is None: return From 63e66a0d2d8a422d3d0f5077f85043f7bba9a33f Mon Sep 17 00:00:00 2001 From: Jiusheng Chen Date: Wed, 9 Sep 2020 18:00:54 +0000 Subject: [PATCH 05/15] updating throughput --- benchmarks/models/hf_bart.sh | 6 +++--- benchmarks/models/hf_distibart.sh | 4 ++-- benchmarks/models/hf_mbart.sh | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/benchmarks/models/hf_bart.sh b/benchmarks/models/hf_bart.sh index 033ce696..d84ebcb1 100755 --- a/benchmarks/models/hf_bart.sh +++ b/benchmarks/models/hf_bart.sh @@ -20,9 +20,9 @@ source utils.sh grep "facebook/bart-large-cnn cnn_dm.1k/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 34.8 35 # Speed on V100 16GB 250W grep -E "transformers_v3.0.2 facebook/bart-large-cnn cnn_dm.1k/raw val 32 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 3.2 3.4 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 32 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 5.2 100 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.2 100 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.4 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 32 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.8 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 8.7 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 9.1 100 ## Accuracy #grep "facebook/bart-large-cnn cnn_dm/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 44.78 44.82 diff --git a/benchmarks/models/hf_distibart.sh b/benchmarks/models/hf_distibart.sh index 1ff95925..c310be3c 100755 --- a/benchmarks/models/hf_distibart.sh +++ b/benchmarks/models/hf_distibart.sh @@ -20,9 +20,9 @@ source utils.sh grep "sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 35.1 35.3 # Speed on V100 16GB 250W grep -E "transformers_v3.0.2 sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 3.9 4.2 -grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.5 100 +grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 9.5 100 # todo: bigger bs doesn't increase speed -grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.5 100 +grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 9.5 100 ## Accuracy #grep "sshleifer/distilbart-cnn-12-6 cnn_dm/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 45 45.1 diff --git a/benchmarks/models/hf_mbart.sh b/benchmarks/models/hf_mbart.sh index 6b5393ab..dcb3d22f 100755 --- a/benchmarks/models/hf_mbart.sh +++ b/benchmarks/models/hf_mbart.sh @@ -15,4 +15,4 @@ source utils.sh grep "facebook/mbart-large-en-ro wmt_en_ro/raw val " perf | awk '{if($8!="NA"){c+=1;s+=$8}}END{print s/c}' | bash range.sh 27.79 27.95 # Speed on V100 16GB 250W grep -E "transformers_v3.0.2 facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 5.8 6.2 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.0 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 7.2 100 From d67bcd3f4414ad85c05332d4c0329aa08ac2d2a8 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Thu, 10 Sep 2020 20:06:32 +0000 Subject: [PATCH 06/15] Ensuring in-order writes --- fastseq_cli/transformers_generate.py | 46 +++++++++++++++++++--------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index 53ebbdc4..b09c7ced 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -24,15 +24,33 @@ def __init__ (self, msg_queue, fout): super(IOProcess, self).__init__() self.msg_queue = msg_queue self.fout = fout - def run (self) : + self.waiting_for=0 + self.dec_buf = {} + + def process_dec (self, dec) : + for hypothesis in dec: + self.fout.write(hypothesis + "\n") + self.fout.flush() + + def process_buffer (self): + while self.waiting_for in self.dec_buf : + self.process_dec(self.dec_buf[self.waiting_for]) + del self.dec_buf[self.waiting_for] + self.waiting_for+=1 + + def run (self) : while (True) : - dec = self.msg_queue.get() + ind, dec = self.msg_queue.get() if dec == GENERATE_FINISHED : break - else : - for hypothesis in dec: - self.fout.write(hypothesis + "\n") - self.fout.flush() + elif ind != self.waiting_for: + self.dec_buf[ind] = dec + else : + self.process_dec(dec) + self.waiting_for+=1 + self.process_buffer() + self.process_buffer() + assert not self.dec_buf, "IO Buffer not empty" self.msg_queue.close() self.msg_queue.join_thread() @@ -47,18 +65,18 @@ def __init__ (self, tokenizer, data_queue, msg_queue, skip_special_tokens, clean def run (self) : while True : - summaries = self.data_queue.get() + ind, summaries = self.data_queue.get() if summaries == GENERATE_FINISHED : - self.data_queue.put(POSTPROCESS_FINISHED) + self.data_queue.put((-1,POSTPROCESS_FINISHED)) break elif summaries == POSTPROCESS_FINISHED : - self.data_queue.put(POSTPROCESS_FINISHED) + self.data_queue.put((-1,POSTPROCESS_FINISHED)) break else : dec = self.tokenizer.batch_decode(summaries, skip_special_tokens = self.skip_special_tokens, clean_up_tokenization_spaces = self.clean_up_tokenization_spaces) - self.msg_queue.put(dec) + self.msg_queue.put((ind,dec)) self.data_queue.close() self.data_queue.join_thread() @@ -109,7 +127,7 @@ def generate_summaries_or_translations( io_process = IOProcess( msg_queue, fout) io_process.start() - for batch in tqdm(list(chunks(examples, batch_size))): + for ind, batch in tqdm(enumerate(list(chunks(examples, batch_size)))): if "t5" in model_name: batch = [model.config.prefix + text for text in batch] batch = tokenizer(batch, @@ -126,11 +144,11 @@ def generate_summaries_or_translations( **gen_kwargs, ) summaries_cpu = summaries.cpu() - data_queue.put(summaries_cpu) - data_queue.put(GENERATE_FINISHED) + data_queue.put((ind,summaries_cpu)) + data_queue.put((-1,GENERATE_FINISHED)) for p in p_list : p.join() - msg_queue.put(GENERATE_FINISHED) + msg_queue.put((-1,GENERATE_FINISHED)) io_process.join() def run_generate(): From ef1a19157dcfdd07054277a5ba7eaba197b088f5 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Thu, 10 Sep 2020 20:35:36 +0000 Subject: [PATCH 07/15] minor comments --- fastseq_cli/transformers_generate.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index b09c7ced..06b38d68 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -19,7 +19,8 @@ def chunks(lst, n): for i in range(0, len(lst), n): yield lst[i:i + n] -class IOProcess (Process) : +class IOProcess (Process) : + """ Write detokenized output to file in order.""" def __init__ (self, msg_queue, fout): super(IOProcess, self).__init__() self.msg_queue = msg_queue @@ -55,6 +56,7 @@ def run (self) : self.msg_queue.join_thread() class PostProcess (Process) : + """ Parallel detokenization """ def __init__ (self, tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) : super(PostProcess, self).__init__() self.data_queue = data_queue @@ -119,7 +121,7 @@ def generate_summaries_or_translations( p_list = [] threads = cpu_count() - for i in range (threads) : + for i in range(threads) : p = PostProcess(tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) p_list.append(p) p.start() From e9edc09a516f77329883040e42c3c813da8e9eac Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Sat, 12 Sep 2020 02:08:24 +0000 Subject: [PATCH 08/15] linting checks --- fastseq_cli/transformers_generate.py | 104 ++++++++++++++------------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index 06b38d68..4aa7a100 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -2,9 +2,9 @@ import argparse import json from pathlib import Path -import torch -from tqdm import tqdm from multiprocessing import Process, Queue, JoinableQueue, cpu_count +from tqdm import tqdm +import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from fastseq_cli.transformers_utils import use_task_specific_params, trim_batch, calculate_rouge, calculate_bleu_score @@ -20,69 +20,71 @@ def chunks(lst, n): yield lst[i:i + n] class IOProcess (Process) : - """ Write detokenized output to file in order.""" - def __init__ (self, msg_queue, fout): - super(IOProcess, self).__init__() - self.msg_queue = msg_queue + """ Write detokenized output to file in order.""" + def __init__ (self, msg_queue, fout): + super(IOProcess, self).__init__() + self.msg_queue = msg_queue self.fout = fout self.waiting_for=0 self.dec_buf = {} - def process_dec (self, dec) : + def process_dec (self, dec) : for hypothesis in dec: self.fout.write(hypothesis + "\n") self.fout.flush() - + def process_buffer (self): - while self.waiting_for in self.dec_buf : + while self.waiting_for in self.dec_buf : self.process_dec(self.dec_buf[self.waiting_for]) del self.dec_buf[self.waiting_for] self.waiting_for+=1 def run (self) : - while (True) : - ind, dec = self.msg_queue.get() - if dec == GENERATE_FINISHED : - break - elif ind != self.waiting_for: + while True : + ind, dec = self.msg_queue.get() + if dec == GENERATE_FINISHED : + break + elif ind != self.waiting_for: self.dec_buf[ind] = dec - else : + else : self.process_dec(dec) self.waiting_for+=1 self.process_buffer() - self.process_buffer() - assert not self.dec_buf, "IO Buffer not empty" - self.msg_queue.close() - self.msg_queue.join_thread() + self.process_buffer() + assert not self.dec_buf, "IO Buffer not empty" + self.msg_queue.close() + self.msg_queue.join_thread() class PostProcess (Process) : """ Parallel detokenization """ - def __init__ (self, tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) : - super(PostProcess, self).__init__() - self.data_queue = data_queue - self.msg_queue = msg_queue + def __init__ (self, tokenizer, data_queue, msg_queue, + skip_special_tokens, clean_up_tokenization_spaces) : + super(PostProcess, self).__init__() + self.data_queue = data_queue + self.msg_queue = msg_queue self.tokenizer = tokenizer self.clean_up_tokenization_spaces = clean_up_tokenization_spaces self.skip_special_tokens = skip_special_tokens - def run (self) : - while True : - ind, summaries = self.data_queue.get() - if summaries == GENERATE_FINISHED : - self.data_queue.put((-1,POSTPROCESS_FINISHED)) - break - elif summaries == POSTPROCESS_FINISHED : - self.data_queue.put((-1,POSTPROCESS_FINISHED)) + def run (self) : + while True : + ind, summaries = self.data_queue.get() + if summaries == GENERATE_FINISHED : + self.data_queue.put((-1,POSTPROCESS_FINISHED)) + break + elif summaries == POSTPROCESS_FINISHED : + self.data_queue.put((-1,POSTPROCESS_FINISHED)) break else : dec = self.tokenizer.batch_decode(summaries, - skip_special_tokens = self.skip_special_tokens, - clean_up_tokenization_spaces = self.clean_up_tokenization_spaces) - self.msg_queue.put((ind,dec)) + skip_special_tokens = self.skip_special_tokens, + clean_up_tokenization_spaces = + self.clean_up_tokenization_spaces) + self.msg_queue.put((ind,dec)) - self.data_queue.close() + self.data_queue.close() self.data_queue.join_thread() - self.msg_queue.close() + self.msg_queue.close() self.msg_queue.join_thread() @@ -111,24 +113,25 @@ def generate_summaries_or_translations( model = model.half() if decoder_start_token_id is None: decoder_start_token_id = gen_kwargs.pop("decoder_start_token_id", None) - + tokenizer = AutoTokenizer.from_pretrained(model_name) - + # update config with summarization specific params use_task_specific_params(model, task) - data_queue = Queue() - msg_queue = Queue() + data_queue = Queue() + msg_queue = Queue() p_list = [] threads = cpu_count() - - for i in range(threads) : - p = PostProcess(tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) + + for i in range(threads) : + p = PostProcess(tokenizer, data_queue, msg_queue, + skip_special_tokens, clean_up_tokenization_spaces) p_list.append(p) p.start() - + io_process = IOProcess( msg_queue, fout) io_process.start() - + for ind, batch in tqdm(enumerate(list(chunks(examples, batch_size)))): if "t5" in model_name: batch = [model.config.prefix + text for text in batch] @@ -147,12 +150,13 @@ def generate_summaries_or_translations( ) summaries_cpu = summaries.cpu() data_queue.put((ind,summaries_cpu)) - data_queue.put((-1,GENERATE_FINISHED)) + data_queue.put((-1,GENERATE_FINISHED)) for p in p_list : - p.join() + p.join() msg_queue.put((-1,GENERATE_FINISHED)) io_process.join() - + fout.close() + def run_generate(): """Entrance is here.""" parser = argparse.ArgumentParser() @@ -198,9 +202,9 @@ def run_generate(): help="How many observations. Defaults to all.") parser.add_argument("--fp16", action="store_true") parser.add_argument("--without_fastseq_opt", action="store_true") - parser.add_argument("--no_repeat_ngram_size", type=int, default=None, + parser.add_argument("--no_repeat_ngram_size", type=int, default=None, required=False, help="size of no repeat ngram") - + args = parser.parse_args() examples = [ " " + x.rstrip() if "t5" in args.model_name else x.rstrip() From ee40b82fd805ee7d70bd7d0da18789abda4d716e Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Wed, 16 Sep 2020 17:31:05 +0000 Subject: [PATCH 09/15] formatting changes --- fastseq_cli/transformers_generate.py | 32 ++++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index 4aa7a100..b236d326 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -2,7 +2,7 @@ import argparse import json from pathlib import Path -from multiprocessing import Process, Queue, JoinableQueue, cpu_count +from multiprocessing import Process, Queue, cpu_count from tqdm import tqdm import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer @@ -21,28 +21,28 @@ def chunks(lst, n): class IOProcess (Process) : """ Write detokenized output to file in order.""" - def __init__ (self, msg_queue, fout): + def __init__(self, msg_queue, fout): super(IOProcess, self).__init__() self.msg_queue = msg_queue self.fout = fout self.waiting_for=0 self.dec_buf = {} - def process_dec (self, dec) : + def process_dec(self, dec) : for hypothesis in dec: self.fout.write(hypothesis + "\n") self.fout.flush() - def process_buffer (self): + def process_buffer(self): while self.waiting_for in self.dec_buf : self.process_dec(self.dec_buf[self.waiting_for]) del self.dec_buf[self.waiting_for] self.waiting_for+=1 - def run (self) : + def run(self) : while True : ind, dec = self.msg_queue.get() - if dec == GENERATE_FINISHED : + if dec == GENERATE_FINISHED: break elif ind != self.waiting_for: self.dec_buf[ind] = dec @@ -55,9 +55,9 @@ def run (self) : self.msg_queue.close() self.msg_queue.join_thread() -class PostProcess (Process) : +class PostProcess(Process) : """ Parallel detokenization """ - def __init__ (self, tokenizer, data_queue, msg_queue, + def __init__(self, tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) : super(PostProcess, self).__init__() self.data_queue = data_queue @@ -66,21 +66,21 @@ def __init__ (self, tokenizer, data_queue, msg_queue, self.clean_up_tokenization_spaces = clean_up_tokenization_spaces self.skip_special_tokens = skip_special_tokens - def run (self) : + def run(self) : while True : ind, summaries = self.data_queue.get() - if summaries == GENERATE_FINISHED : - self.data_queue.put((-1,POSTPROCESS_FINISHED)) + if summaries == GENERATE_FINISHED: + self.data_queue.put((-1, POSTPROCESS_FINISHED)) break elif summaries == POSTPROCESS_FINISHED : - self.data_queue.put((-1,POSTPROCESS_FINISHED)) + self.data_queue.put((-1, POSTPROCESS_FINISHED)) break else : dec = self.tokenizer.batch_decode(summaries, skip_special_tokens = self.skip_special_tokens, clean_up_tokenization_spaces = self.clean_up_tokenization_spaces) - self.msg_queue.put((ind,dec)) + self.msg_queue.put((ind, dec)) self.data_queue.close() self.data_queue.join_thread() @@ -149,11 +149,11 @@ def generate_summaries_or_translations( **gen_kwargs, ) summaries_cpu = summaries.cpu() - data_queue.put((ind,summaries_cpu)) - data_queue.put((-1,GENERATE_FINISHED)) + data_queue.put((ind, summaries_cpu)) + data_queue.put((-1, GENERATE_FINISHED)) for p in p_list : p.join() - msg_queue.put((-1,GENERATE_FINISHED)) + msg_queue.put((-1, GENERATE_FINISHED)) io_process.join() fout.close() From 5bf190b39dfa9341679ce394c41f313bd8197b88 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Fri, 25 Sep 2020 06:25:35 +0000 Subject: [PATCH 10/15] Multi-worker preprocess and fetch --- fastseq_cli/transformers_generate.py | 92 ++++++++++++++++++---------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index b236d326..d692e57c 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -2,7 +2,7 @@ import argparse import json from pathlib import Path -from multiprocessing import Process, Queue, cpu_count +from multiprocessing import Process, Queue from tqdm import tqdm import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer @@ -13,13 +13,28 @@ GENERATE_FINISHED = 'done' POSTPROCESS_FINISHED = None +class Dataset(torch.utils.data.Dataset): + """Characterizes a dataset for PyTorch""" + def __init__(self, examples, tokenizer, model_name, prefix): + self.examples = examples + self.tokenizer= tokenizer + self.model_name = model_name + self.prefix = prefix -def chunks(lst, n): - """Yield successive n-sized chunks from lst.""" - for i in range(0, len(lst), n): - yield lst[i:i + n] + def __len__(self): + return len(self.examples) -class IOProcess (Process) : + def __getitem__(self, index): + if "t5" in self.model_name: + batch = [self.prefix + text for text in batch] + batch = self.examples[index] + batch = self.tokenizer(batch, + return_tensors="pt", + truncation=True, + padding="max_length") + return batch['input_ids'], batch['attention_mask'] + +class IOProcess (Process): """ Write detokenized output to file in order.""" def __init__(self, msg_queue, fout): super(IOProcess, self).__init__() @@ -28,25 +43,25 @@ def __init__(self, msg_queue, fout): self.waiting_for=0 self.dec_buf = {} - def process_dec(self, dec) : + def process_dec(self, dec): for hypothesis in dec: self.fout.write(hypothesis + "\n") self.fout.flush() def process_buffer(self): - while self.waiting_for in self.dec_buf : + while self.waiting_for in self.dec_buf: self.process_dec(self.dec_buf[self.waiting_for]) del self.dec_buf[self.waiting_for] self.waiting_for+=1 - def run(self) : - while True : + def run(self): + while True: ind, dec = self.msg_queue.get() if dec == GENERATE_FINISHED: break elif ind != self.waiting_for: self.dec_buf[ind] = dec - else : + else: self.process_dec(dec) self.waiting_for+=1 self.process_buffer() @@ -55,10 +70,10 @@ def run(self) : self.msg_queue.close() self.msg_queue.join_thread() -class PostProcess(Process) : +class PostProcess(Process): """ Parallel detokenization """ def __init__(self, tokenizer, data_queue, msg_queue, - skip_special_tokens, clean_up_tokenization_spaces) : + skip_special_tokens, clean_up_tokenization_spaces): super(PostProcess, self).__init__() self.data_queue = data_queue self.msg_queue = msg_queue @@ -66,16 +81,16 @@ def __init__(self, tokenizer, data_queue, msg_queue, self.clean_up_tokenization_spaces = clean_up_tokenization_spaces self.skip_special_tokens = skip_special_tokens - def run(self) : - while True : + def run(self): + while True: ind, summaries = self.data_queue.get() if summaries == GENERATE_FINISHED: self.data_queue.put((-1, POSTPROCESS_FINISHED)) break - elif summaries == POSTPROCESS_FINISHED : + elif summaries == POSTPROCESS_FINISHED: self.data_queue.put((-1, POSTPROCESS_FINISHED)) break - else : + else: dec = self.tokenizer.batch_decode(summaries, skip_special_tokens = self.skip_special_tokens, clean_up_tokenization_spaces = @@ -87,7 +102,6 @@ def run(self) : self.msg_queue.close() self.msg_queue.join_thread() - def generate_summaries_or_translations( examples: list, out_file: str, @@ -101,6 +115,8 @@ def generate_summaries_or_translations( no_repeat_ngram_size=None, skip_special_tokens=True, clean_up_tokenization_spaces=False, + pre_process_threads=2, + post_process_threads=2, **gen_kwargs, ) -> None: """Run generation""" @@ -121,9 +137,8 @@ def generate_summaries_or_translations( data_queue = Queue() msg_queue = Queue() p_list = [] - threads = cpu_count() - for i in range(threads) : + for i in range(post_process_threads): p = PostProcess(tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) p_list.append(p) @@ -131,16 +146,15 @@ def generate_summaries_or_translations( io_process = IOProcess( msg_queue, fout) io_process.start() - - for ind, batch in tqdm(enumerate(list(chunks(examples, batch_size)))): - if "t5" in model_name: - batch = [model.config.prefix + text for text in batch] - batch = tokenizer(batch, - return_tensors="pt", - truncation=True, - padding="max_length").to(device) + dataset = Dataset(examples, tokenizer, model_name, model.config.prefix) + training_generator = torch.utils.data.DataLoader(dataset, + batch_size=batch_size, num_workers = pre_process_threads) + for ind, batch in tqdm(enumerate(training_generator)): + input_ids, attention_mask = batch + input_ids = input_ids.view(batch_size, -1).to(device) + attention_mask = attention_mask.view(batch_size, -1).to(device) input_ids, attention_mask = trim_batch( - **batch, pad_token_id=tokenizer.pad_token_id) + input_ids, tokenizer.pad_token_id, attention_mask) summaries = model.generate( input_ids=input_ids, attention_mask=attention_mask, @@ -151,7 +165,7 @@ def generate_summaries_or_translations( summaries_cpu = summaries.cpu() data_queue.put((ind, summaries_cpu)) data_queue.put((-1, GENERATE_FINISHED)) - for p in p_list : + for p in p_list: p.join() msg_queue.put((-1, GENERATE_FINISHED)) io_process.join() @@ -204,6 +218,18 @@ def run_generate(): parser.add_argument("--without_fastseq_opt", action="store_true") parser.add_argument("--no_repeat_ngram_size", type=int, default=None, required=False, help="size of no repeat ngram") + parser.add_argument("--include_special_tokens", action="store_true") + parser.add_argument("--clean_up_tokenization_spaces", action="store_true") + parser.add_argument("--pre_process_threads", + type=int, + default=2, + required=False, + help="pre-processing worker threads") + parser.add_argument("--post_process_threads", + type=int, + default=2, + required=False, + help="post-processing worker threads") args = parser.parse_args() examples = [ @@ -224,8 +250,10 @@ def run_generate(): decoder_start_token_id=args.decoder_start_token_id, fastseq_opt=not args.without_fastseq_opt, no_repeat_ngram_size=args.no_repeat_ngram_size, - skip_special_tokens=True, - clean_up_tokenization_spaces=False, + skip_special_tokens=not args.include_special_tokens, + clean_up_tokenization_spaces=args.clean_up_tokenization_spaces, + pre_process_threads=args.pre_process_threads, + post_process_threads=args.post_process_threads, ) if args.reference_path is None: return From a595d0205b22057c83693c4d391204b9b4905311 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Tue, 29 Sep 2020 23:48:05 +0000 Subject: [PATCH 11/15] nitpicks --- fastseq_cli/transformers_generate.py | 30 ++++++++++++++++------------ 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index d692e57c..070bd3ca 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -13,13 +13,16 @@ GENERATE_FINISHED = 'done' POSTPROCESS_FINISHED = None -class Dataset(torch.utils.data.Dataset): +class TokenizeDataset(torch.utils.data.Dataset): """Characterizes a dataset for PyTorch""" def __init__(self, examples, tokenizer, model_name, prefix): self.examples = examples self.tokenizer= tokenizer self.model_name = model_name self.prefix = prefix + self.return_tensors="pt" + self.truncation=True + self.padding="max_length" def __len__(self): return len(self.examples) @@ -29,9 +32,9 @@ def __getitem__(self, index): batch = [self.prefix + text for text in batch] batch = self.examples[index] batch = self.tokenizer(batch, - return_tensors="pt", - truncation=True, - padding="max_length") + return_tensors=self.return_tensors, + truncation=self.truncation, + padding=self.padding) return batch['input_ids'], batch['attention_mask'] class IOProcess (Process): @@ -115,8 +118,8 @@ def generate_summaries_or_translations( no_repeat_ngram_size=None, skip_special_tokens=True, clean_up_tokenization_spaces=False, - pre_process_threads=2, - post_process_threads=2, + preprocess_cpu_num=2, + postprocess_cpu_num=2, **gen_kwargs, ) -> None: """Run generation""" @@ -138,7 +141,7 @@ def generate_summaries_or_translations( msg_queue = Queue() p_list = [] - for i in range(post_process_threads): + for i in range(postprocess_cpu_num): p = PostProcess(tokenizer, data_queue, msg_queue, skip_special_tokens, clean_up_tokenization_spaces) p_list.append(p) @@ -146,9 +149,10 @@ def generate_summaries_or_translations( io_process = IOProcess( msg_queue, fout) io_process.start() - dataset = Dataset(examples, tokenizer, model_name, model.config.prefix) + dataset = TokenizeDataset(examples, tokenizer, model_name, + model.config.prefix) training_generator = torch.utils.data.DataLoader(dataset, - batch_size=batch_size, num_workers = pre_process_threads) + batch_size=batch_size, num_workers = preprocess_cpu_num) for ind, batch in tqdm(enumerate(training_generator)): input_ids, attention_mask = batch input_ids = input_ids.view(batch_size, -1).to(device) @@ -220,12 +224,12 @@ def run_generate(): required=False, help="size of no repeat ngram") parser.add_argument("--include_special_tokens", action="store_true") parser.add_argument("--clean_up_tokenization_spaces", action="store_true") - parser.add_argument("--pre_process_threads", + parser.add_argument("--preprocess_cpu_num", type=int, default=2, required=False, help="pre-processing worker threads") - parser.add_argument("--post_process_threads", + parser.add_argument("--postprocess_cpu_num", type=int, default=2, required=False, @@ -252,8 +256,8 @@ def run_generate(): no_repeat_ngram_size=args.no_repeat_ngram_size, skip_special_tokens=not args.include_special_tokens, clean_up_tokenization_spaces=args.clean_up_tokenization_spaces, - pre_process_threads=args.pre_process_threads, - post_process_threads=args.post_process_threads, + preprocess_cpu_num=args.preprocess_cpu_num, + postprocess_cpu_num=args.postprocess_cpu_num, ) if args.reference_path is None: return From 9ce149a18b2c8cf5f6cf0ecea91bdd5dfabfe828 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Fri, 30 Oct 2020 00:59:53 +0000 Subject: [PATCH 12/15] argument description added --- fastseq_cli/transformers_generate.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index 070bd3ca..5638dbdc 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -16,6 +16,13 @@ class TokenizeDataset(torch.utils.data.Dataset): """Characterizes a dataset for PyTorch""" def __init__(self, examples, tokenizer, model_name, prefix): + """Multiprocess Dataloader. + Args: + examples (List(str)): a list of input sentences. + tokenizer (AutoTokenizer): instance of AutoTokenizer. + model_name (string): model name. + prefix (string): input example prefix if any. + """ self.examples = examples self.tokenizer= tokenizer self.model_name = model_name From 143aeff2553c7ce5fed0523f58590c791c866445 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Wed, 11 Nov 2020 23:20:48 +0000 Subject: [PATCH 13/15] benchmarks updated --- benchmarks/models/hf_bart.sh | 6 +++--- benchmarks/models/hf_distibart.sh | 4 ++-- benchmarks/models/hf_mbart.sh | 4 ++-- benchmarks/models/hf_t5.sh | 4 ++-- fastseq_cli/transformers_generate.py | 7 ++++--- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/benchmarks/models/hf_bart.sh b/benchmarks/models/hf_bart.sh index d84ebcb1..893bf6a6 100755 --- a/benchmarks/models/hf_bart.sh +++ b/benchmarks/models/hf_bart.sh @@ -20,9 +20,9 @@ source utils.sh grep "facebook/bart-large-cnn cnn_dm.1k/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 34.8 35 # Speed on V100 16GB 250W grep -E "transformers_v3.0.2 facebook/bart-large-cnn cnn_dm.1k/raw val 32 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 3.2 3.4 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 32 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.8 100 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 8.7 100 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 9.1 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 32 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 7.9 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 10.7 100 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/bart-large-cnn cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 11.0 100 ## Accuracy #grep "facebook/bart-large-cnn cnn_dm/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 44.78 44.82 diff --git a/benchmarks/models/hf_distibart.sh b/benchmarks/models/hf_distibart.sh index c310be3c..4a1b95c7 100755 --- a/benchmarks/models/hf_distibart.sh +++ b/benchmarks/models/hf_distibart.sh @@ -20,9 +20,9 @@ source utils.sh grep "sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 35.1 35.3 # Speed on V100 16GB 250W grep -E "transformers_v3.0.2 sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 3.9 4.2 -grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 9.5 100 +grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 13.3 100 # todo: bigger bs doesn't increase speed -grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 9.5 100 +grep -E "transformers_v3.0.2\+fastseq_v.* sshleifer/distilbart-cnn-12-6 cnn_dm.1k/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 13.5 100 ## Accuracy #grep "sshleifer/distilbart-cnn-12-6 cnn_dm/raw val " perf | awk '{print $9}' | awk -F'|' '{if($1!="NA"){c+=1;s+=$1}}END{print s/c}' | bash range.sh 45 45.1 diff --git a/benchmarks/models/hf_mbart.sh b/benchmarks/models/hf_mbart.sh index dcb3d22f..58891834 100755 --- a/benchmarks/models/hf_mbart.sh +++ b/benchmarks/models/hf_mbart.sh @@ -14,5 +14,5 @@ source utils.sh # Accuracy grep "facebook/mbart-large-en-ro wmt_en_ro/raw val " perf | awk '{if($8!="NA"){c+=1;s+=$8}}END{print s/c}' | bash range.sh 27.79 27.95 # Speed on V100 16GB 250W -grep -E "transformers_v3.0.2 facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 5.8 6.2 -grep -E "transformers_v3.0.2\+fastseq_v.* facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 7.2 100 +grep -E "transformers_v3.0.2 facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 7.6 7.7 +grep -E "transformers_v3.0.2\+fastseq_v.* facebook/mbart-large-en-ro wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6.0 100 diff --git a/benchmarks/models/hf_t5.sh b/benchmarks/models/hf_t5.sh index fa0402c8..47c3b963 100755 --- a/benchmarks/models/hf_t5.sh +++ b/benchmarks/models/hf_t5.sh @@ -14,6 +14,6 @@ source utils.sh grep "t5-base wmt_en_ro/raw val " perf | awk '{if($8!="NA"){c+=1;s+=$8}}END{print s/c}' | bash range.sh 27.42 27.44 # Speed on V100 16GB 250W grep -E "transformers_v3.0.2 t5-base wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 4.6 5.2 -grep -E "transformers_v3.0.2\+fastseq_v.* t5-base wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 6 6.5 -grep -E "transformers_v3.0.2\+fastseq_v.* t5-base wmt_en_ro/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 7 7.5 +grep -E "transformers_v3.0.2\+fastseq_v.* t5-base wmt_en_ro/raw val 64 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 14.4 14.8 +grep -E "transformers_v3.0.2\+fastseq_v.* t5-base wmt_en_ro/raw val 128 " perf | awk '{s+=$13}END{print s/NR}' | bash range.sh 16.8 17.0 diff --git a/fastseq_cli/transformers_generate.py b/fastseq_cli/transformers_generate.py index 5638dbdc..bf30c28a 100644 --- a/fastseq_cli/transformers_generate.py +++ b/fastseq_cli/transformers_generate.py @@ -35,9 +35,9 @@ def __len__(self): return len(self.examples) def __getitem__(self, index): - if "t5" in self.model_name: - batch = [self.prefix + text for text in batch] batch = self.examples[index] + if "t5" in self.model_name: + batch = self.prefix + batch batch = self.tokenizer(batch, return_tensors=self.return_tensors, truncation=self.truncation, @@ -159,7 +159,8 @@ def generate_summaries_or_translations( dataset = TokenizeDataset(examples, tokenizer, model_name, model.config.prefix) training_generator = torch.utils.data.DataLoader(dataset, - batch_size=batch_size, num_workers = preprocess_cpu_num) + batch_size=batch_size, num_workers = preprocess_cpu_num, + drop_last=True) for ind, batch in tqdm(enumerate(training_generator)): input_ids, attention_mask = batch input_ids = input_ids.view(batch_size, -1).to(device) From 47c5941c77a5b6335bb3d6280a6640c5639a397a Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Wed, 11 Nov 2020 23:49:49 +0000 Subject: [PATCH 14/15] readme Updated --- README.md | 167 ++++++++++++------------------------------------------ 1 file changed, 37 insertions(+), 130 deletions(-) diff --git a/README.md b/README.md index 64c38c2d..9afa04d3 100644 --- a/README.md +++ b/README.md @@ -1,129 +1,43 @@

FastSeq

-# Introduction +## Introduction -FastSeq provides efficient implementations of the popular sequence models with high performance for text generation, summarization, and translation tasks. It can automatically optimize the performance of the pupular NLP toolkits (e.g. [FairSeq](https://github.com/pytorch/fairseq)) by simply `import fastseq`. +FastSeq provides efficient implementation of popular sequence models (e.g. [Bart](https://arxiv.org/pdf/1910.13461.pdf), [ProphetNet](https://github.com/microsoft/ProphetNet)) for text generation, summarization, translation tasks etc. It automatically optimizes inference speed based on pupular NLP toolkits (e.g. [FairSeq](https://github.com/pytorch/fairseq) and [HuggingFace-Transformers](https://github.com/huggingface/transformers)) without accuracy loss. All these can be easily done (no need to change any code/model/data if using our command line tool, or simply add one-line code `import fastseq` if using source code). -# Supported Models +## Speed Gain +Below shows the generation speed gain by using FastSeq. -## Supported models in [fairseq](https://github.com/pytorch/fairseq) +| Model | W/O FastSeq (in samples/s) | W/ FastSeq (in samples/s) | Speedup | +|------------------|:--------------------------:|:-------------------------:|:-----:| +| [ProphetNet](examples/prophetnet/README.md) | 2.7 | 10.3 | 3.8x | +| [Bart (`fs`)](examples/bart/README.md) | 2.7 | 14.5 | 5.4x | +| [Bart (`hf`)](examples/bart/README.md#speedup-bart-huggingface-transformers-version-by-using-fastseq) | 3.4 | 6.4 | 1.9x | +| [DistilBart (`hf`)](examples/distilbart/README.md) | 4.0 | 6.5 | 1.6x | +| [T5 (`hf`)](examples/t5/README.md) | 4.8 | 7.5 | 1.6x | +| [WMT16 En-De (`fs`)](examples/wmt/README.md) | 84.0 | 135.0 | 1.6x | -- [x] [BART](https://arxiv.org/pdf/1910.13461.pdf) -- [x] [Scaling Neural Machine Translation (Ott et al., 2018)](https://github.com/pytorch/fairseq/blob/master/examples/scaling_nmt/README.md) -- [x] [Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)](https://github.com/pytorch/fairseq/blob/master/examples/translation_moe/README.md) -- [x] [Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019)](https://github.com/pytorch/fairseq/blob/master/examples/pay_less_attention_paper/README.md) +- All the following benchmarking experiments run on NVIDIA-V100-16GB with [docker](docker/Dockerfile). Highest speed recorded for each model by tuning batch size. For parameter setting details, click link of corresponding model. +- `fs` stands for [Fairseq](https://github.com/pytorch/fairseq) 0.9.0 version, `hf` stands for [Huggingface Transformers](https://github.com/huggingface/transformers) 3.0.2 version. +- Optimizations were automatically applied to all generation/sequence models in Fairseq & Huggingface Transformers. Above only lists a subset of them. +## How it works? +We developped a wide range of speedup techniques, including improving beam search efficiency, reducing memory footprint, speeding up calculation for key operations etc, IO speedup etc. To seamlessly connect with community, they were applied to existing models from Fairseq and Huggingface Transformers in the backend, while keeping model interface and usage same as before. -## Supported models in [HuggingFace-Transformers](https://github.com/huggingface/transformers) +## Installation -- [x] [BART](https://huggingface.co/transformers/model_doc/bart.html) -- [ ] [GPT-2](https://huggingface.co/transformers/model_doc/gpt2.html) -- [ ] [UniLM-V1](https://github.com/microsoft/unilm) -- [ ] [UniLM-V2](https://github.com/microsoft/unilm) -- [ ] [ProphetNet](https://github.com/microsoft/ProphetNet) -- [x] [T5](https://huggingface.co/transformers/model_doc/t5.html) - -# Benchmarks - -## BART from Fairseq - -- CNN daily mail val data, NVIDIA-V100-16GB - - | BatchSize | 32 | 64 | 128 | - |:----------------:|:-------------:|:---------------:|:--------------:| - | fairseq-0.9.0 | 2.7 samples/s | OOM | OOM | - | above + fastseq | 9.0 samples/s | 12.5 samples/s | 14.5 samples/s | - -with setting: - -```bash -$ fastseq-generate-for-fairseq \ - cnn_dm.1k/len-1024.bin \ - --path bart.large.cnn/model.pt \ - --fp16 \ - --task translation \ - --batch-size BATCH_SIZE \ - --gen-subset valid \ - --truncate-source \ - --bpe gpt2 \ - --beam 4 \ - --num-workers 4 \ - --min-len 55 \ - --max-len-b 140 \ - --no-repeat-ngram-size 3 \ - --lenpen 2.0 -``` - -To get the baseline fairseq's speed number, replace `fastseq-generate-for-fairseq` by `fairseq-generate`. - -## BART from Transformers - -- CNN daily mail val data, NVIDIA-V100-16GB - - | BatchSize | 32 | 64 | 128 | - |:-------------------:|:-------------:|:--------------:|:--------------:| - | transformers-3.0.2 | 3.4 samples/s | OOM | OOM | - | above + fastseq | 5.2 samples/s | 6.2 samples/s | 6.4 samples/s | - | transformers-2.11.0 | 2.5 samples/s | OOM | OOM | - | above + fastseq | 4.4 samples/s | 5.3 samples/s | >5.3 samples/s | - -(numbers for 2.11.0 needs to be updated based on docker env.) - -with setting: - -```bash -$ fastseq-generate-for-transformers \ - facebook/bart-large-cnn \ - cnn_dm.1k/val.source \ - out.summary \ - --reference_path cnn_dm/val.target \ - --device cuda \ - --bs 128 \ - --fp16 \ - --score_path out.score \ - --task summarization -``` - -To get the baseline transformers' speed number, we can either add option `--without_fastseq_opt` or use [tool](https://github.com/huggingface/transformers/tree/master/examples/seq2seq) provided in Transformers GitHub repository. - -## WMT from Fairseq -- [WMT16 En-De](https://github.com/pytorch/fairseq/tree/master/examples/scaling_nmt) model - - | BatchSize | 256 | 512 | 1024 | - |:----------------:|:--------------:|:--------------:|:--------------:| - | fairseq-0.9.0 | 84 samples/s | OOM | OOM | - | above + fastseq | 129 samples/s | 131 samples/s | 135 samples/s | - - -with setting: - -```bash -$ fastseq-generate-for-fairseq \ - wmt14.en-fr.joined-dict.newstest2014/ \ - --path wmt14.en-fr.joined-dict.transformer/model.pt \ - --beam 4 \ - --lenpen 0.6 \ - --remove-bpe \ - --batch-size 32 -``` - -To get the fairseq's speed number, replace `fastseq-generate-for-fairseq` by `fairseq-generate`. - -# Installation - -## Requirements +### Requirements - Python version >= 3.6 - [torch](http://pytorch.org/) >= 1.4.0 -- [fairseq](https://github.com/pytorch/fairseq) >= 0.9.0 -- [transformers](https://github.com/huggingface/transformers) >= 3.0.2 +- [fairseq](https://github.com/pytorch/fairseq) == 0.9.0 +- [transformers](https://github.com/huggingface/transformers) == 3.0.2 - [requets](https://pypi.org/project/requests/) >= 2.24.0 - [absl-py](https://pypi.org/project/absl-py/) >= 0.9.0 -- [rouge-score](https://pypi.org/project/rouge-score/) +- [rouge-score](https://pypi.org/project/rouge-score/) >= 0.0.4 If you use fairseq or transformers, you only need to install one of them. If you use both, you need to install both. -## Python package +### Install from PIP package `fastseq` Python package can be directly installed with pip using @@ -131,7 +45,7 @@ If you use fairseq or transformers, you only need to install one of them. If you $ pip install fastseq ``` -## Install from the source +### Install from the source ```bash $ git clone https://github.com/microsoft/fastseq @@ -139,9 +53,9 @@ $ cd fastseq $ pip install --editable ./ ``` -# Usage +## Usage -## Example +### Use source code for speedup Only one line of code change is needed to use the optimizations provided by `FastSeq`. @@ -164,8 +78,9 @@ hypotheses = bart.sample( print(hypotheses) ``` -## Command line tool for fairseq models -Example + +### Use command line tool to speedup fairseq models +Example usage for bart model on cnn daily mail task. ```bash $ fastseq-generate-for-fairseq \ @@ -184,9 +99,10 @@ $ fastseq-generate-for-fairseq \ --no-repeat-ngram-size 3 \ --lenpen 2.0 ``` +Both model file and task data file are the same as original Fairseq version. -## Command line tool for transformers models -Example +### Use command line tool to speedup transformers models +Example usage for bart model on cnn daily mail task. ```bash $ fastseq-generate-for-transformers \ @@ -200,16 +116,14 @@ $ fastseq-generate-for-transformers \ --score_path out.score \ --task summarization ``` +Both model file and task data file are the same as original Transformers version. -## Run tests +### Run tests ```bash # run a single test. $ python tests/optimizer/fairseq/test_fairseq_optimizer.py -# run benchmark. -$ python tests/optimizer/fairseq/benchmark_fairseq_optimizer.py - # run all the tests. $ python -m unittest discover -s tests/ -p '*.py' @@ -217,16 +131,9 @@ $ python -m unittest discover -s tests/ -p '*.py' $ cd benchmarks && bash run_all_benchmarks.sh ``` -## Build - -```bash -# build package -$ python setup.py sdist bdist_wheel -``` - -# Code Style +## Code Style -## Python coding style +### Python coding style Changes to Python code should conform to [PEP 8](https://www.python.org/dev/peps/pep-0008/). `yapf` can be used to help format the python code, and use `pylint` to check your Python changes. @@ -238,7 +145,7 @@ $ yapf --style pep8 -i -r PYTHON_FILE/PACKAGE $ pylint --rcfile=.pylintrc PYTHON_FILE/PACKAGE ``` -# Contributing +## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us From 3ed00066c6b374bd1643071c960308a3efcacfc9 Mon Sep 17 00:00:00 2001 From: NickNickGo Date: Thu, 12 Nov 2020 01:00:35 +0000 Subject: [PATCH 15/15] readme update --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9afa04d3..ce753d86 100644 --- a/README.md +++ b/README.md @@ -10,11 +10,11 @@ Below shows the generation speed gain by using FastSeq. | Model | W/O FastSeq (in samples/s) | W/ FastSeq (in samples/s) | Speedup | |------------------|:--------------------------:|:-------------------------:|:-----:| | [ProphetNet](examples/prophetnet/README.md) | 2.7 | 10.3 | 3.8x | -| [Bart (`fs`)](examples/bart/README.md) | 2.7 | 14.5 | 5.4x | -| [Bart (`hf`)](examples/bart/README.md#speedup-bart-huggingface-transformers-version-by-using-fastseq) | 3.4 | 6.4 | 1.9x | -| [DistilBart (`hf`)](examples/distilbart/README.md) | 4.0 | 6.5 | 1.6x | -| [T5 (`hf`)](examples/t5/README.md) | 4.8 | 7.5 | 1.6x | -| [WMT16 En-De (`fs`)](examples/wmt/README.md) | 84.0 | 135.0 | 1.6x | +| [Bart (`fs`)](examples/bart/README.md) | 2.7 | 13.3 | 5x | +| [Bart (`hf`)](examples/bart/README.md#speedup-bart-huggingface-transformers-version-by-using-fastseq) | 3.4 | 11.0 | 3.2x | +| [DistilBart (`hf`)](examples/distilbart/README.md) | 4.0 | 13.5 | 3.4x | +| [T5 (`hf`)](examples/t5/README.md) | 4.8 | 17.0 | 3.5x | +| [WMT16 En-De (`fs`)](examples/wmt/README.md) | 84.0 | 124.0 | 1.5x | - All the following benchmarking experiments run on NVIDIA-V100-16GB with [docker](docker/Dockerfile). Highest speed recorded for each model by tuning batch size. For parameter setting details, click link of corresponding model. - `fs` stands for [Fairseq](https://github.com/pytorch/fairseq) 0.9.0 version, `hf` stands for [Huggingface Transformers](https://github.com/huggingface/transformers) 3.0.2 version.