pip install huggingface_hub
!pip install 'transformers[torch]'
mamba install accelerate pytorch transformers -y
Models are stored to
~/.cache/huggingface/
from transformers import DetrFeatureExtractor, DetrForSegmentation
from PIL import Image
import requests
url = 'https://huggingface.co/tasks/assets/image-segmentation/image-segmentation-input.jpeg'
image = Image.open(requests.get(url, stream=True).raw)
feature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50-panoptic')
model = DetrForSegmentation.from_pretrained('facebook/detr-resnet-50-panoptic')
inputs = feature_extractor(images=image, return_tensors="pt")
outputs = model(**inputs)
pred_masks = outputs.pred_masks
import torch
from transformers import pipeline
generate_text = pipeline(
model="databricks/dolly-v2-12b",
# torch_dtype=torch.bfloat16, # may not need if have large memory
trust_remote_code=True,
device_map="auto",
)
# Takes a long time...
res = generate_text("Explain to me the difference between nuclear fission and fusion.")
print(res[0]["generated_text"])
import torch
from instruct_pipeline import InstructionTextGenerationPipeline
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b", padding_side="left")
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b", device_map="auto", torch_dtype=torch.bfloat16)
generate_text = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer)
res = generate_text("How do I build a campfire?")
print(res[0]["generated_text"])
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m-deduped")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/pythia-70m-deduped")
prompt = "Hi, how are you?"
inputs = tokenizer(prompt, return_tensors="pt")
Generate next part of the sentence
generate_ids = model.generate(inputs.input_ids, max_length=30)
tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "Qwen/Qwen2.5-7B-Instruct"
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = "Give me a short introduction to large language model."
messages = [
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(
**model_inputs,
max_new_tokens=512
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]