脚本之家,脚本语言编程技术及教程分享平台!
分类导航

Python|VBS|Ruby|Lua|perl|VBA|Golang|PowerShell|Erlang|autoit|Dos|bat|

服务器之家 - 脚本之家 - Python - pytorch下使用LSTM神经网络写诗实例

pytorch下使用LSTM神经网络写诗实例

2020-04-24 10:42ColdCabbage Python

今天小编就为大家分享一篇pytorch下使用LSTM神经网络写诗实例,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧

在pytorch下,以数万首唐诗为素材,训练双层LSTM神经网络,使其能够以唐诗的方式写诗。

代码结构分为四部分,分别为

1.model.py,定义了双层LSTM模型

2.data.py,定义了从网上得到的唐诗数据的处理方法

3.utlis.py 定义了损失可视化的函数

4.main.py定义了模型参数,以及训练、唐诗生成函数。

参考:电子工业出版社的《深度学习框架PyTorch:入门与实践》第九章

main代码及注释如下

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
import sys, os
import torch as t
from data import get_data
from model import PoetryModel
from torch import nn
from torch.autograd import Variable
from utils import Visualizer
import tqdm
from torchnet import meter
import ipdb
 
class Config(object):
    data_path = 'data/'
    pickle_path = 'tang.npz'
    author = None
    constrain = None
    category = 'poet.tang' #or poet.song
    lr = 1e-3
    weight_decay = 1e-4
    use_gpu = True
    epoch = 20
    batch_size = 128
    maxlen = 125
    plot_every = 20
    #use_env = True #是否使用visodm
    env = 'poety'
    #visdom env
    max_gen_len = 200
    debug_file = '/tmp/debugp'
    model_path = None
    prefix_words = '细雨鱼儿出,微风燕子斜。'
    #不是诗歌组成部分,是意境
    start_words = '闲云潭影日悠悠'
    #诗歌开始
    acrostic = False
    #是否藏头
    model_prefix = 'checkpoints/tang'
    #模型保存路径
opt = Config()
 
def generate(model, start_words, ix2word, word2ix, prefix_words=None):
    '''
    给定几个词,根据这几个词接着生成一首完整的诗歌
    '''
    results = list(start_words)
    start_word_len = len(start_words)
    # 手动设置第一个词为<START>
    # 这个地方有问题,最后需要再看一下
    input = Variable(t.Tensor([word2ix['<START>']]).view(1,1).long())
    if opt.use_gpu:input=input.cuda()
    hidden = None
    
    if prefix_words:
        for word in prefix_words:
            output,hidden = model(input,hidden)
            # 下边这句话是为了把input变成1*1?
            input = Variable(input.data.new([word2ix[word]])).view(1,1)
    for i in range(opt.max_gen_len):
        output,hidden = model(input,hidden)
        
        if i<start_word_len:
            w = results[i]
            input = Variable(input.data.new([word2ix[w]])).view(1,1)
        else:
            top_index = output.data[0].topk(1)[1][0]
            w = ix2word[top_index]
            results.append(w)
            input = Variable(input.data.new([top_index])).view(1,1)
        if w=='<EOP>':
            del results[-1] #-1的意思是倒数第一个
            break
    return results
 
def gen_acrostic(model,start_words,ix2word,word2ix, prefix_words = None):
 '''
 生成藏头诗
 start_words : u'深度学习'
 生成:
 深木通中岳,青苔半日脂。
 度山分地险,逆浪到南巴。
 学道兵犹毒,当时燕不移。
 习根通古岸,开镜出清羸。
 '''
 results = []
 start_word_len = len(start_words)
 input = Variable(t.Tensor([word2ix['<START>']]).view(1,1).long())
 if opt.use_gpu:input=input.cuda()
 hidden = None
 
 index=0 # 用来指示已经生成了多少句藏头诗
 # 上一个词
 pre_word='<START>'
 
 if prefix_words:
  for word in prefix_words:
   output,hidden = model(input,hidden)
   input = Variable(input.data.new([word2ix[word]])).view(1,1)
 
 for i in range(opt.max_gen_len):
  output,hidden = model(input,hidden)
  top_index = output.data[0].topk(1)[1][0]
  w = ix2word[top_index]
 
  if (pre_word in {u'。',u'!','<START>'} ):
   # 如果遇到句号,藏头的词送进去生成
 
   if index==start_word_len:
    # 如果生成的诗歌已经包含全部藏头的词,则结束
    break
   else:
    # 把藏头的词作为输入送入模型
    w = start_words[index]
    index+=1
    input = Variable(input.data.new([word2ix[w]])).view(1,1)
  else:
   # 否则的话,把上一次预测是词作为下一个词输入
   input = Variable(input.data.new([word2ix[w]])).view(1,1)
  results.append(w)
  pre_word = w
 return results
 
def train(**kwargs):
    
    for k,v in kwargs.items():
        setattr(opt,k,v) #设置apt里属性的值
    vis = Visualizer(env=opt.env)
    
    #获取数据
    data, word2ix, ix2word = get_data(opt) #get_data是data.py里的函数
    data = t.from_numpy(data)
    #这个地方出错了,是大写的L
    dataloader = t.utils.data.DataLoader(data,
                    batch_size = opt.batch_size,
                    shuffle = True,
                    num_workers = 1) #在python里,这样写程序可以吗?
 #模型定义
    model = PoetryModel(len(word2ix), 128, 256)
    optimizer = t.optim.Adam(model.parameters(), lr=opt.lr)
    criterion = nn.CrossEntropyLoss()
 
    if opt.model_path:
        model.load_state_dict(t.load(opt.model_path))
    if opt.use_gpu:
        model.cuda()
        criterion.cuda()
        
    #The tnt.AverageValueMeter measures and returns the average value
    #and the standard deviation of any collection of numbers that are
    #added to it. It is useful, for instance, to measure the average
    #loss over a collection of examples.
 
 #The add() function expects as input a Lua number value, which
 #is the value that needs to be added to the list of values to
 #average. It also takes as input an optional parameter n that
 #assigns a weight to value in the average, in order to facilitate
 #computing weighted averages (default = 1).
 
 #The tnt.AverageValueMeter has no parameters to be set at initialization time.
    loss_meter = meter.AverageValueMeter()
    
    for epoch in range(opt.epoch):
        loss_meter.reset()
        for ii,data_ in tqdm.tqdm(enumerate(dataloader)):
            #tqdm是python中的进度条
            #训练
            data_ = data_.long().transpose(1,0).contiguous()
            #上边一句话,把data_变成long类型,把1维和0维转置,把内存调成连续的
            if opt.use_gpu: data_ = data_.cuda()
            optimizer.zero_grad()
            input_, target = Variable(data_[:-1,:]), Variable(data_[1:,:])
            #上边一句,将输入的诗句错开一个字,形成训练和目标
            output,_ = model(input_)
            loss = criterion(output, target.view(-1))
            loss.backward()
            optimizer.step()
            
            loss_meter.add(loss.data[0]) #为什么是data[0]?
            
            #可视化用到的是utlis.py里的函数
            if (1+ii)%opt.plot_every ==0:
                
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()
                vis.plot('loss',loss_meter.value()[0])
                
                # 下面是对目前模型情况的测试,诗歌原文
                poetrys = [[ix2word[_word] for _word in data_[:,_iii]]
                                    for _iii in range(data_.size(1))][:16]
                #上面句子嵌套了两个循环,主要是将诗歌索引的前十六个字变成原文
                vis.text('</br>'.join([''.join(poetry) for poetry in
                poetrys]),win = u'origin_poem')
                gen_poetries = []
                #分别以以下几个字作为诗歌的第一个字,生成8首诗
                for word in list(u'春江花月夜凉如水'):
                    gen_poetry = ''.join(generate(model,word,ix2word,word2ix))
                    gen_poetries.append(gen_poetry)
                vis.text('</br>'.join([''.join(poetry) for poetry in
                gen_poetries]), win = u'gen_poem')
        t.save(model.state_dict(), '%s_%s.pth' %(opt.model_prefix,epoch))
 
def gen(**kwargs):
    '''
    提供命令行接口,用以生成相应的诗
    '''
    
    for k,v in kwargs.items():
        setattr(opt,k,v)
    data, word2ix, ix2word = get_data(opt)
    model = PoetryModel(len(word2ix), 128, 256)
    map_location = lambda s,l:s
    # 上边句子里的map_location是在load里用的,用以加载到指定的CPU或GPU,
    # 上边句子的意思是将模型加载到默认的GPU上
    state_dict = t.load(opt.model_path, map_location = map_location)
    model.load_state_dict(state_dict)
    
    if opt.use_gpu:
        model.cuda()
    if sys.version_info.major == 3:
        if opt.start_words.insprintable():
            start_words = opt.start_words
            prefix_words = opt.prefix_words if opt.prefix_words else None
        else:
            start_words = opt.start_words.encode('ascii',\
            'surrogateescape').decode('utf8')
            prefix_words = opt.prefix_words.encode('ascii',\
            'surrogateescape').decode('utf8') if opt.prefix_words else None
        start_words = start_words.replace(',',u',')\
                                            .replace('.',u'。')\
                                            .replace('?',u'?')
        gen_poetry = gen_acrostic if opt.acrostic else generate
        result = gen_poetry(model,start_words,ix2word,word2ix,prefix_words)
        print(''.join(result))
if __name__ == '__main__':
    import fire
    fire.Fire()

以上代码给我一些经验,

1. 了解python的编程方式,如空格、换行等;进一步了解python的各个基本模块;

2. 可能出的错误:函数名写错,大小写,变量名写错,括号不全。

3. 对cuda()的用法有了进一步认识;

4. 学会了调试程序(fire);

5. 学会了训练结果的可视化(visdom);

6. 进一步的了解了LSTM,对深度学习的架构、实现有了宏观把控。

这篇pytorch下使用LSTM神经网络写诗实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。

原文链接:https://blog.csdn.net/weixin_39845112/article/details/80045091

延伸 · 阅读

精彩推荐