Deep Learning für Text mit PyTorch
Shubham Jain
Instructor
"- Wichtige Anwendungsbereiche: Chatbots, Übersetzung von Sprachen, technisches Schreiben
"
{{1}}"
"`python
import torch
import torch.nn as nn
----CODE_GLUE---- ```python data = \"Hallo, wie geht es dir?\" chars = list(set(data)) char_to_ix = {char: i for i, char in enumerate(chars)} ix_to_char = {i: char for i, char in enumerate(chars)}class RNNModel(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNNModel, self).__init__() self.hidden_size = hidden_sizeself.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
----CODE_GLUE----
python
self.fc = nn.Linear(hidden_size, output_size){{5}}"
"`python
def forward(self, x):
----CODE_GLUE---- ```python h0 = torch.zeros(1, x.size(0), self.hidden_size)out, _ = self.rnn(x, h0)out = self.fc(out[:, -1, :])return outmodel = RNNmodel(1, 16, 1)criterion = nn.CrossEntropyLoss()
----CODE_GLUE----
python
optimizer = torch.optim.Adam(model.parameters(), lr=0.01){{8}}"
inputs = [char_to_ix[ch] for ch in data[:-1]] targets = [char_to_ix[ch] for ch in data[1:]]inputs = torch.tensor(inputs, dtype=torch.long) .view(-1, 1)inputs = nn.functional.one_hot( inputs, num_classes=len(chars)).float()targets = torch.tensor(targets, dtype=torch.long)
"- Indizes erstellen
"`python
for epoch in range(100):
----CODE_GLUE---- ```python model.train()outputs = model(inputs)loss = criterion(outputs, targets)optimizer.zero_grad()loss.backward()optimizer.step()
----CODE_GLUE----
`python
if (epoch+1) % 10 == 0:
print(f'Epoch {epoch+1}/100, Loss: {loss.item()}')
`{{10}}"
"`python
model.eval()
----CODE_GLUE---- ```python test_input = char_to_ix['h']test_input = nn.functional.one_hot(torch.tensor(test_input) .view(-1, 1), num_classes=len(chars)).float()predicted_output = model(test_input) predicted_char_ix = torch.argmax(predicted_output, 1).item()print(f'Test Input: 10, Vorhergesagter Output: {model(test_input).item()}')
Epoche 10/100, Verlust: 3090.861572265625
Epoche 20/100, Verlust: 2935.4580078125
...
Epoche 100/100, Verlust: 1922.44140625
out
Testeingabe: h, Vorhergesagte Ausgabe: e{{5}}"
Deep Learning für Text mit PyTorch