▪ Github
Are you a fan of machine learning and looking to expand your knowledge in the field? Look no further than the Telegram channel 'Machine learning books and papers' curated by the knowledgeable admin @Raminmousa. This channel is dedicated to providing its members with a vast collection of machine learning books and research papers to help them stay up-to-date with the latest trends and developments in the industry. Whether you are a beginner looking to learn the basics or an experienced professional seeking advanced resources, this channel has something for everyone. By joining 'Machine learning books and papers', you will have access to a diverse range of resources that will enhance your understanding of machine learning algorithms, techniques, and applications. Stay informed, inspired, and ahead of the curve by joining this valuable community today. For more information and to join the channel, visit the following link: https://t.me/Machine_learn
13 Nov, 21:14
12 Nov, 12:39
10 Nov, 23:13
10 Nov, 22:21
10 Nov, 10:18
10 Nov, 10:17
10 Nov, 10:16
10 Nov, 10:15
08 Nov, 18:01
07 Nov, 15:14
07 Nov, 07:56
07 Nov, 07:55
07 Nov, 07:54
06 Nov, 12:39
05 Nov, 19:03
04 Nov, 20:10
04 Nov, 15:50
04 Nov, 06:09
04 Nov, 04:38
04 Nov, 04:37
03 Nov, 09:26
03 Nov, 09:24
01 Nov, 17:07
01 Nov, 14:58
01 Nov, 06:29
31 Oct, 10:55
31 Oct, 10:51
from transformers import AutoTokenizer, AutoModelForCausalLM
model_id = "CohereForAI/aya-expanse-8b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# Format the message with the chat template
messages = [{"role": "user", "content": " %prompt% "}]
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>%prompt%<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
gen_tokens = model.generate(
input_ids,
max_new_tokens=100,
do_sample=True,
temperature=0.3,
)
gen_text = tokenizer.decode(gen_tokens[0])
print(gen_text)
31 Oct, 10:48
# install Diffusers
pip install -U diffusers
# Inference
import torch
from diffusers import StableDiffusion3Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16)
pipe = pipe.to("cuda")
image = pipe(
"A happy woman laying on a grass",
num_inference_steps=28,
guidance_scale=3.5,
).images[0]
image.save("woman.png")
31 Oct, 08:55
30 Oct, 13:11
30 Oct, 13:06
30 Oct, 13:06
30 Oct, 07:00
29 Oct, 19:16
# Clone repo
git clone https://github.com/Zyphra/transformers_zamba2.git
cd transformers_zamba2
# Install the repository & accelerate:
pip install -e .
pip install accelerate
# Inference:
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-2.7B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-2.7B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
user_turn_1 = "user_prompt1."
assistant_turn_1 = "assistant_prompt."
user_turn_2 = "user_prompt2."
sample = [{'role': 'user', 'content': user_turn_1}, {'role': 'assistant', 'content': assistant_turn_1}, {'role': 'user', 'content': user_turn_2}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))
29 Oct, 19:14
27 Oct, 18:53
27 Oct, 18:53
24 Oct, 19:49
24 Oct, 19:47
23 Oct, 17:13
23 Oct, 13:42
22 Oct, 19:53
21 Oct, 02:31
20 Oct, 19:47
20 Oct, 19:31
# Clone repo
git clone https://github.com/Zyphra/transformers_zamba2.git
cd transformers_zamba2
# Install the repository & accelerate:
pip install -e .
pip install accelerate
# Inference:
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-2.7B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-2.7B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
user_turn_1 = "user_prompt1."
assistant_turn_1 = "assistant_prompt."
user_turn_2 = "user_prompt2."
sample = [{'role': 'user', 'content': user_turn_1}, {'role': 'assistant', 'content': assistant_turn_1}, {'role': 'user', 'content': user_turn_2}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))
20 Oct, 19:30
19 Oct, 14:59
18 Oct, 20:09
18 Oct, 15:11
18 Oct, 14:00
17 Oct, 18:53
17 Oct, 18:21
17 Oct, 11:22
17 Oct, 11:14
17 Oct, 06:16
16 Oct, 06:45
15 Oct, 09:48
15 Oct, 08:34
14 Oct, 18:44
14 Oct, 18:42
13 Oct, 19:47
13 Oct, 06:24
12 Oct, 11:28
11 Oct, 14:41
11 Oct, 14:40
10 Oct, 05:55