python-3.x IndexError:index out of range in self

brqmpdu1  于 2023-05-19  发布在  Python
关注(0)|答案(1)|浏览(238)

gpt2_fine_tune.py

from datasets import load_dataset
from transformers import GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments

# Step 1: Load the pre-trained GPT-2 model
model = GPT2LMHeadModel.from_pretrained('gpt2')

# Step 2: Tokenize the training data
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
train_file_path = 'shakespeare.txt'

# Step 3: Prepare the training data
# train_dataset = TextDataset(tokenizer, file_path=train_file_path, block_size=512)
extension = "text"
data_files = train_file_path
raw_datasets = load_dataset(
    extension,
    data_files=data_files,
)
text_column_name = "text"
padding = "max_length"

def tokenize_function(examples):
    return tokenizer(examples['text'], padding='max_length', truncation=True, max_length=512)

column_names = list(raw_datasets["train"].features)
tokenized_datasets = raw_datasets.map(
    tokenize_function,
    batched=True,
    remove_columns=column_names,
)

# Step 4: Create a TrainingArguments object
training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=3,
    per_device_train_batch_size=2,
    per_device_eval_batch_size=2,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir='./logs',
    logging_steps=1000,
    save_steps=5000,
    evaluation_strategy='steps',
    eval_steps=5000,
    load_best_model_at_end=True
)

# Step 5: Instantiate a Trainer object
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets["train"]
)

# Step 6: Train the model
trainer.train()

问题:

index是什么意思
如何解决此错误?
我的错误:

Traceback (most recent call last):
  File "/Users/sarit/study/gpt4all/gpt2_fine_tune.py", line 58, in <module>
    trainer.train()
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py", line 1662, in train
    return inner_training_loop(
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py", line 1929, in _inner_training_loop
    tr_loss_step = self.training_step(model, inputs)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py", line 2699, in training_step
    loss = self.compute_loss(model, inputs)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py", line 2731, in compute_loss
    outputs = model(**inputs)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1075, in forward
    transformer_outputs = self.transformer(
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 842, in forward
    inputs_embeds = self.wte(input_ids)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/sparse.py", line 162, in forward
    return F.embedding(
  File "/Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/functional.py", line 2210, in embedding
    return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
IndexError: index out of range in self
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /Users/sarit/study/gpt4all/gpt2_fine_tune.py:58 in <module>                                      │
│                                                                                                  │
│   55 )                                                                                           │
│   56                                                                                             │
│   57 # Step 6: Train the model                                                                   │
│ ❱ 58 trainer.train()                                                                             │
│   59                                                                                             │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py:1662 in train       │
│                                                                                                  │
│   1659 │   │   inner_training_loop = find_executable_batch_size(                                 │
│   1660 │   │   │   self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size  │
│   1661 │   │   )                                                                                 │
│ ❱ 1662 │   │   return inner_training_loop(                                                       │
│   1663 │   │   │   args=args,                                                                    │
│   1664 │   │   │   resume_from_checkpoint=resume_from_checkpoint,                                │
│   1665 │   │   │   trial=trial,                                                                  │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py:1929 in             │
│ _inner_training_loop                                                                             │
│                                                                                                  │
│   1926 │   │   │   │   │   with model.no_sync():                                                 │
│   1927 │   │   │   │   │   │   tr_loss_step = self.training_step(model, inputs)                  │
│   1928 │   │   │   │   else:                                                                     │
│ ❱ 1929 │   │   │   │   │   tr_loss_step = self.training_step(model, inputs)                      │
│   1930 │   │   │   │                                                                             │
│   1931 │   │   │   │   if (                                                                      │
│   1932 │   │   │   │   │   args.logging_nan_inf_filter                                           │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py:2699 in             │
│ training_step                                                                                    │
│                                                                                                  │
│   2696 │   │   │   return loss_mb.reduce_mean().detach().to(self.args.device)                    │
│   2697 │   │                                                                                     │
│   2698 │   │   with self.compute_loss_context_manager():                                         │
│ ❱ 2699 │   │   │   loss = self.compute_loss(model, inputs)                                       │
│   2700 │   │                                                                                     │
│   2701 │   │   if self.args.n_gpu > 1:                                                           │
│   2702 │   │   │   loss = loss.mean()  # mean() to average on multi-gpu parallel training        │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/trainer.py:2731 in             │
│ compute_loss                                                                                     │
│                                                                                                  │
│   2728 │   │   │   labels = inputs.pop("labels")                                                 │
│   2729 │   │   else:                                                                             │
│   2730 │   │   │   labels = None                                                                 │
│ ❱ 2731 │   │   outputs = model(**inputs)                                                         │
│   2732 │   │   # Save past state if it exists                                                    │
│   2733 │   │   # TODO: this needs to be fixed and made cleaner later.                            │
│   2734 │   │   if self.args.past_index >= 0:                                                     │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/module.py:1501 in          │
│ _call_impl                                                                                       │
│                                                                                                  │
│   1498 │   │   if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks   │
│   1499 │   │   │   │   or _global_backward_pre_hooks or _global_backward_hooks                   │
│   1500 │   │   │   │   or _global_forward_hooks or _global_forward_pre_hooks):                   │
│ ❱ 1501 │   │   │   return forward_call(*args, **kwargs)                                          │
│   1502 │   │   # Do not call functions when jit is used                                          │
│   1503 │   │   full_backward_hooks, non_full_backward_hooks = [], []                             │
│   1504 │   │   backward_pre_hooks = []                                                           │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py:1 │
│ 075 in forward                                                                                   │
│                                                                                                  │
│   1072 │   │   """                                                                               │
│   1073 │   │   return_dict = return_dict if return_dict is not None else self.config.use_return  │
│   1074 │   │                                                                                     │
│ ❱ 1075 │   │   transformer_outputs = self.transformer(                                           │
│   1076 │   │   │   input_ids,                                                                    │
│   1077 │   │   │   past_key_values=past_key_values,                                              │
│   1078 │   │   │   attention_mask=attention_mask,                                                │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/module.py:1501 in          │
│ _call_impl                                                                                       │
│                                                                                                  │
│   1498 │   │   if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks   │
│   1499 │   │   │   │   or _global_backward_pre_hooks or _global_backward_hooks                   │
│   1500 │   │   │   │   or _global_forward_hooks or _global_forward_pre_hooks):                   │
│ ❱ 1501 │   │   │   return forward_call(*args, **kwargs)                                          │
│   1502 │   │   # Do not call functions when jit is used                                          │
│   1503 │   │   full_backward_hooks, non_full_backward_hooks = [], []                             │
│   1504 │   │   backward_pre_hooks = []                                                           │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py:8 │
│ 42 in forward                                                                                    │
│                                                                                                  │
│    839 │   │   head_mask = self.get_head_mask(head_mask, self.config.n_layer)                    │
│    840 │   │                                                                                     │
│    841 │   │   if inputs_embeds is None:                                                         │
│ ❱  842 │   │   │   inputs_embeds = self.wte(input_ids)                                           │
│    843 │   │   position_embeds = self.wpe(position_ids)                                          │
│    844 │   │   hidden_states = inputs_embeds + position_embeds                                   │
│    845                                                                                           │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/module.py:1501 in          │
│ _call_impl                                                                                       │
│                                                                                                  │
│   1498 │   │   if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks   │
│   1499 │   │   │   │   or _global_backward_pre_hooks or _global_backward_hooks                   │
│   1500 │   │   │   │   or _global_forward_hooks or _global_forward_pre_hooks):                   │
│ ❱ 1501 │   │   │   return forward_call(*args, **kwargs)                                          │
│   1502 │   │   # Do not call functions when jit is used                                          │
│   1503 │   │   full_backward_hooks, non_full_backward_hooks = [], []                             │
│   1504 │   │   backward_pre_hooks = []                                                           │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/modules/sparse.py:162 in forward   │
│                                                                                                  │
│   159 │   │   │   │   self.weight[self.padding_idx].fill_(0)                                     │
│   160 │                                                                                          │
│   161 │   def forward(self, input: Tensor) -> Tensor:                                            │
│ ❱ 162 │   │   return F.embedding(                                                                │
│   163 │   │   │   input, self.weight, self.padding_idx, self.max_norm,                           │
│   164 │   │   │   self.norm_type, self.scale_grad_by_freq, self.sparse)                          │
│   165                                                                                            │
│                                                                                                  │
│ /Users/sarit/miniforge3/lib/python3.10/site-packages/torch/nn/functional.py:2210 in embedding    │
│                                                                                                  │
│   2207 │   │   #   torch.embedding_renorm_                                                       │
│   2208 │   │   # remove once script supports set_grad_enabled                                    │
│   2209 │   │   _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)                    │
│ ❱ 2210 │   return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)        │
│   2211                                                                                           │
│   2212                                                                                           │
│   2213 def embedding_bag(                                                                        │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
IndexError: index out of range in self
uttx8gqw

uttx8gqw1#

问题来自dataset。我找到了另一个例子,它起作用了。这是完整的版本。我还缩短了文本文件,以加快我的培训时间。

import torch
from torch.utils.data import random_split
from transformers import GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments, TextDataset, \
    DataCollatorForLanguageModeling

# Step 1: Load the pre-trained GPT-2 model
model = GPT2LMHeadModel.from_pretrained('gpt2')

# Step 2: Tokenize the training data
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '[PAD]'})

# Step 3: Prepare the training data
dataset = TextDataset(
    tokenizer=tokenizer,
    file_path="elspeare.txt",
    block_size=128
)
train_size = int(0.9 * len(dataset))
eval_size = len(dataset) - train_size

train_dataset, eval_dataset = random_split(dataset, [train_size, eval_size])

data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer, mlm=False
)

print(f"torch.backends.mps.is_available(): {torch.backends.mps.is_available()}")

# Step 4: Create a TrainingArguments object
training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=3,
    per_device_train_batch_size=2,
    per_device_eval_batch_size=2,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir='./logs',
    logging_steps=1000,
    save_steps=5000,
    evaluation_strategy='steps',
    eval_steps=5000,
    load_best_model_at_end=True,
    use_mps_device=torch.backends.mps.is_available(),
)

# Step 5: Instantiate a Trainer object
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    data_collator=data_collator,
)

# Step 6: Train the model
trainer.train()
trainer.save_model("gpt2_fine_tune")

相关问题