0

GPT2LMHeadModelPytorch Lightning を使用してカジュアルな言語モデリング (与えられた一連の単語から次の単語を予測する) 用に Huggingface モデルを微調整しようとすると、トレーニング中にエラーが発生します。

AttributeError: 'str' オブジェクトには属性 'size' がありません

トレーニング コードの何が問題だったのでしょうか? DataCollatorForLanguageModelingこれは Pytorch での間違った使用によるものDataLoaderですか?

再現可能な例:

import os
from pathlib import Path
import torch
import pytorch_lightning as pl
from transformers import (
    GPT2Config,
    GPT2LMHeadModel,
    GPT2Tokenizer,
    DataCollatorForLanguageModeling,
)
from transformers.optimization import AdamW
from tokenizers import ByteLevelBPETokenizer
from torch.utils.data import (
    DataLoader,
    Dataset,
)

TOKENIZER_DIRPATH = os.path.join("..", "data")


def tokenize_data():
    tokenizer = ByteLevelBPETokenizer()
    tokenizer.train(
        files=os.path.join(TOKENIZER_DIRPATH, "words.txt"),
        vocab_size=50000,
        min_frequency=2,
        special_tokens=["<s>", "</s>", "<unk>", "<mask>", "<pad>",],
    )
    tokenizer.save_model("../data")


class MyDataset(Dataset):
    def __init__(self):
        tokenizer = GPT2Tokenizer(
            os.path.join(TOKENIZER_DIRPATH, "vocab.json"),
            os.path.join(TOKENIZER_DIRPATH, "merges.txt"),
        )

        src_file = Path(os.path.join(TOKENIZER_DIRPATH, "words.txt"))
        lines = src_file.read_text(encoding="utf-8").splitlines()
        self.examples = [tokenizer.encode(line) for line in lines]

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i):
        return torch.tensor(self.examples[i])


class MyDataModule(pl.LightningDataModule):
    def __init__(self):
        super().__init__()
        self.tokenizer = GPT2Tokenizer(
            os.path.join(TOKENIZER_DIRPATH, "vocab.json"),
            os.path.join(TOKENIZER_DIRPATH, "merges.txt"),
        )

    def setup(self, stage):
        self.train_dataset = MyDataset()

    def train_dataloader(self):
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=self.tokenizer, mlm=False
        )
        train_dataloader = DataLoader(self.train_dataset, collate_fn=data_collator)
        return train_dataloader


class MyModel(pl.LightningModule):
    def __init__(self, learning_rate, adam_beta1, adam_beta2, adam_epsilon):
        super().__init__()
        self.save_hyperparameters()
        config = GPT2Config()
        self.model = GPT2LMHeadModel(config)

    def forward(self, x):
        return self.model(x).logits

    def training_step(self, batch, batch_idx):
        input_ids, labels = batch
        loss = self.model(input_ids, labels=labels).loss
        self.log("train_loss", loss, on_epoch=True)
        return loss

    def configure_optimizers(self):
        optimizer = AdamW(
            self.parameters(),
            self.hparams.learning_rate,
            betas=(self.hparams.adam_beta1, self.hparams.adam_beta2),
            eps=self.hparams.adam_epsilon,
        )
        return optimizer


tokenize_data()
dm = MyDataModule()
model = MyModel(
    learning_rate=5e-5, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-8,
)

trainer = pl.Trainer()
trainer.fit(model, dm)

エラー トレースバック:

Epoch 0:   0%|                                                                                                                                                                                                                                             | 0/9 [00:00<?, ?it/s]
Traceback (most recent call last):
  File "test_gpt.py", line 102, in <module>
    trainer.fit(model, dm)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 499, in fit
    self.dispatch()
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 546, in dispatch
    self.accelerator.start_training(self)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 73, in start_training
    self.training_type_plugin.start_training(trainer)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 114, in start_training
    self._results = trainer.run_train()
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 637, in run_train
    self.train_loop.run_training_epoch()
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 493, in run_training_epoch
    batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 655, in run_training_batch
    self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 426, in optimizer_step
    model_ref.optimizer_step(
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py", line 1387, in optimizer_step
    optimizer.step(closure=optimizer_closure)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py", line 214, in step
    self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py", line 134, in __optimizer_step
    trainer.accelerator.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 277, in optimizer_step
    self.run_optimizer_step(optimizer, opt_idx, lambda_closure, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 282, in run_optimizer_step
    self.training_type_plugin.optimizer_step(optimizer, lambda_closure=lambda_closure, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 163, in optimizer_step
    optimizer.step(closure=lambda_closure, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/transformers/optimization.py", line 318, in step
    loss = closure()
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 649, in train_step_and_backward_closure
    result = self.training_step_and_backward(
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 743, in training_step_and_backward
    result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 293, in training_step
    training_step_output = self.trainer.accelerator.training_step(args)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 156, in training_step
    return self.training_type_plugin.training_step(*args)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 125, in training_step
    return self.lightning_module.training_step(*args, **kwargs)
  File "test_gpt.py", line 81, in training_step
    loss = self.model(input_ids, labels=labels).loss
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 904, in forward
    transformer_outputs = self.transformer(
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 633, in forward
    input_shape = input_ids.size()
AttributeError: 'str' object has no attribute 'size'

コンダパッケージ:

pytorch                   1.7.0           py3.8_cuda10.2.89_cudnn7.6.5_0    pytorch
pytorch-lightning         1.2.5              pyhd8ed1ab_0    conda-forge
tokenizers                0.10.1                   pypi_0    pypi
transformers              4.4.2                    pypi_0    pypi
4

1 に答える 1