12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182 |
- from time import time
- from aphrodite import LLM, SamplingParams
- # Common prefix.
- prefix = (
- "You are an expert school principal, skilled in effectively managing "
- "faculty and staff. Draft 10-15 questions for a potential first grade "
- "Head Teacher for my K-12, all-girls', independent school that emphasizes "
- "community, joyful discovery, and life-long learning. The candidate is "
- "coming in for a first-round panel interview for a 8th grade Math "
- "teaching role. They have 5 years of previous teaching experience "
- "as an assistant teacher at a co-ed, public school with experience "
- "in middle school math teaching. Based on these information, fulfill "
- "the following paragraph: ")
- # Sample prompts.
- prompts = [
- "Once upon a time,",
- "In a galaxy far, far away,",
- "The quick brown fox jumps over the lazy dog.",
- "The meaning of life is",
- ]
- generating_prompts = [prefix + prompt for prompt in prompts]
- # Create a sampling params object.
- sampling_params = SamplingParams(temperature=0.0)
- # Create an LLM.
- regular_llm = LLM(model="NousResearch/Meta-Llama-3.1-8B-Instruct")
- prefix_cached_llm = LLM(model="NousResearch/Meta-Llama-3.1-8B-Instruct",
- enable_prefix_caching=True,
- gpu_memory_utilization=0.4)
- print("Results without `enable_prefix_caching`")
- # Generate texts from the prompts. The output is a list of RequestOutput objects
- # that contain the prompt, generated text, and other information.
- start_time_regular = time()
- outputs = regular_llm.generate(generating_prompts, sampling_params)
- duration_regular = time() - start_time_regular
- regular_generated_texts = []
- # Print the outputs.
- for output in outputs:
- prompt = output.prompt
- generated_text = output.outputs[0].text
- regular_generated_texts.append(generated_text)
- print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
- print("-" * 80)
- # Warmup so that the shared prompt's KV cache is computed.
- prefix_cached_llm.generate(generating_prompts[0], sampling_params)
- # Generate with prefix caching.
- start_time_cached = time()
- outputs = prefix_cached_llm.generate(generating_prompts, sampling_params)
- duration_cached = time() - start_time_cached
- print("Results with `enable_prefix_caching`")
- cached_generated_texts = []
- # Print the outputs. You should see the same outputs as before.
- for output in outputs:
- prompt = output.prompt
- generated_text = output.outputs[0].text
- cached_generated_texts.append(generated_text)
- print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
- print("-" * 80)
- # Compare the results and display the speedup
- generated_same = all([
- regular_generated_texts[i] == cached_generated_texts[i]
- for i in range(len(prompts))
- ])
- print(f"Generated answers are the same: {generated_same}")
- speedup = round(duration_regular / duration_cached, 2)
- print(f"Speed up of cached generation compared to the regular is: {speedup}")
|