LoRA on qwen-0.6B

### Load all the necessary component
from peft import LoraConfig, get_peft_model, TaskType
from transformers import AutoModelForCausalLM, TrainingArguments, Trainer
from modelscope import AutoTokenizer
import torch
import swanlab
import os
import pandas as pd

os.environ["SWANLAB_PROJECT"] = 'qwen3-0.6b'
PROMPT = 'As a cell biologiest, you can find out the name of the cell from its markers.'
MAX_LENGTH = 2048
swanlab.config.update(
    {
        'model':'Qwen/Qwen3-0.6b',
        'prompt': PROMPT,
        'data_max_length':MAX_LENGTH,
    }
)
### Load the model and tokenizer
model_path = '/path/to/the/models/qwen/qwen3-0.6b/'
model = AutoModelForCausalLM.from_pretrained(model_path, device_map = 'auto', torch_dtype = 'auto')
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
model.enable_input_require_grads() 
### Config LoRA
lora_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM, 
    inference_mode=False,
    r=64, # LoRA rank,could be 8, 16, 32, 64。The larger, the more capable, but consume more memory
    lora_alpha=128, # LoRA_alpha, could be 2 * r or more
    lora_dropout=0.05, # t
    target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], 
    # Qwen3DecoderLayer.self_attn.q_proj, k_proj, v_proj, o_proj
    # Qwen3MLP.gate_proj, up_proj, down_proj
)
### Turn model into PEFT model
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
### Loading your datasets from a json file
def process_func(example):
    input_ids, attention_mask, labels = [], [], []
    instruction = tokenizer(f"{example['marker']}", add_special_tokens=True)
    # print(f'instruction: {instruction[0]} \n')
    response = tokenizer(f"{example['cell_name']}", add_special_tokens=False)
    input_ids = instruction["input_ids"] + response["input_ids"] 
    attention_mask = (
        instruction["attention_mask"] + response["attention_mask"] 
    )
    labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] 
    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}

dataset = pd.read_json('/path/to/dataset.json')
dataset = Dataset.from_pandas(dataset)
dataset = dataset.map(process_func, remove_columns=dataset.column_names)
### Config the training parameter and start training
args = TrainingArguments(
    output_dir="./qwen-o.6b-lora",
    label_names=['labels'],
    per_device_train_batch_size=1,
    per_device_eval_batch_size=1,
    gradient_accumulation_steps=4,
    eval_steps=100,
    logging_steps=100,
    num_train_epochs=2,
    learning_rate=5e-4,
    save_on_each_node=True,
    gradient_checkpointing=True,
    report_to="swanlab",
    run_name="qwen-0.6b-lora",
)
trainer = Trainer(
    model = model,
    args = args,
    train_dataset=dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
)
trainer.train()
### Merge and save the LoRA model
model = model.merge_and_unload()
model.save_pretrained('./qwen-o.6b-lora')
tokenizer.save_pretrained('./qwen-o.6b-lora')
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容