Reinforcement Learning from Human Feedback (RLHF)
Mina Parham
AI Engineer
from peft import prepare_model_for_int8_training
pretrained_model = AutoModelForCausalLM.from_pretrained( model_name, load_in_8bit=True )
pretrained_model_8bit = prepare_model_for_int8_training(pretrained_model)
from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=32, # Rank of the low-rank matrices
lora_alpha=32, # Scaling factor for the LoRA updates
lora_dropout=0.1, # Dropout rate for LoRA layers
bias="lora_only"# Only update bias terms for LoRA layers, others remain frozen
)
lora_model = get_peft_model(pretrained_model_8bit, config) model = AutoModelForCausalLMWithValueHead.from_pretrained(lora_model)
ppo_trainer = PPOTrainer(
config, # The config we just defined
model, # Our PPO model
ref_model=None,
tokenizer=tokenizer,
dataset=dataset,
data_collator=collator,
optimizer=optimizer
)
Reinforcement Learning from Human Feedback (RLHF)