-
Notifications
You must be signed in to change notification settings - Fork 3
/
model.py
56 lines (41 loc) · 1.58 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from torch import nn
from torch.distributions import normal
import torch
class Actor(nn.Module):
def __init__(self, n_states, n_actions):
super(Actor, self).__init__()
self.n_states = n_states
self.n_actions = n_actions
self.fc1 = nn.Linear(in_features=self.n_states, out_features=64)
self.fc2 = nn.Linear(in_features=64, out_features=64)
self.mu = nn.Linear(in_features=64, out_features=self.n_actions)
self.log_std = nn.Parameter(torch.zeros(1, self.n_actions))
for layer in self.modules():
if isinstance(layer, nn.Linear):
nn.init.orthogonal_(layer.weight)
layer.bias.data.zero_()
def forward(self, inputs):
x = inputs
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
mu = self.mu(x)
std = self.log_std.exp()
dist = normal.Normal(mu, std)
return dist
class Critic(nn.Module):
def __init__(self, n_states):
super(Critic, self).__init__()
self.n_states = n_states
self.fc1 = nn.Linear(in_features=self.n_states, out_features=64)
self.fc2 = nn.Linear(in_features=64, out_features=64)
self.value = nn.Linear(in_features=64, out_features=1)
for layer in self.modules():
if isinstance(layer, nn.Linear):
nn.init.orthogonal_(layer.weight)
layer.bias.data.zero_()
def forward(self, inputs):
x = inputs
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
value = self.value(x)
return value