forked from mtanana/torchneuralconvo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
beam.lua
61 lines (44 loc) · 1.25 KB
/
beam.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
--
-- Created by IntelliJ IDEA.
-- User: user
-- Date: 7/1/2016
-- Time: 8:47 PM
-- To change this template use File | Settings | File Templates.
--
require 'neuralconvo'
require 'util.Tester'
local tokenizer = require "tokenizer"
local list = require "pl.List"
require 'nn'
local WordSplitLMMinibatchLoader = require 'util.WordSplitLMMinibatchLoader'
local options = {}
if loader == nil then
cmd = torch.CmdLine()
cmd:text('Options:')
cmd:option('--cuda', false, 'use CUDA. Training must be done on CUDA')
cmd:option('--debug', false, 'show debug info')
cmd:option('--dataset', "model.t7", 'show debug info')
cmd:option('--vocablocation', "data/opensubssmall/vocabwords.t7", 'show debug info')
cmd:text()
options = cmd:parse(arg)
-- Enabled CUDA
if options.cuda then
require 'cutorch'
require 'cunn'
end
-- Data
loader = WordSplitLMMinibatchLoader.createFromJustVocab(options.vocablocation)
end
if model == nil then
print("-- Loading model")
model = torch.load("data/"..options.dataset)
end
function say(text)
print(getResponseBeam(text,loader,model,options.debug,5))
end
repeat
io.write("Ask: ")
io.flush()
answer=io.read()
io.write(say(answer))
until answer=="end"