diff --git a/README.md b/README.md index fd6afa8d..aeb1d86b 100644 --- a/README.md +++ b/README.md @@ -142,6 +142,30 @@ to run in OpenCL mode add the flag `-gpu_backend opencl`. There are more flags you can use to configure sampling; [read about them here](doc/flags.md#sampling). +## Step 4 (optional): Serve samples from an HTTP server +If you want to generate new text on demand without loading the model for every sample, you can use the script `server.lua`. +You will need to install [Turbo](https://github.com/kernelsauce/turbo) framework first: +```bash +luarocks install turbo +``` + +Check [Turbo installation manual](https://github.com/kernelsauce/turbo#installation) in case of problems. +Then run the server: + +```bash +th server.lua -checkpoint cv/checkpoint_10000.t7 -port 8888 +``` + +Now you can generate new sample by sending an HTTP GET request: +```bash +curl -G -d "length=100&temperature=0.9" http://localhost:8888/sample +``` + +The following command line arguments of `sample.lua` remain unchanged for `server.lua`: +`-checkpoint`, `-gpu`, `-gpu_backend`, `-verbose`. `-port` argument configures the HTTP port. +The other arguments (`length`, `start_text`, `temperature` and `sample`) should be passed as GET parameters. + + # Benchmarks To benchmark `torch-rnn` against `char-rnn`, we use each to train LSTM language models for the tiny-shakespeare dataset with 1, 2 or 3 layers and with an RNN size of 64, 128, 256, or 512. For each we use a minibatch size of 50, a sequence diff --git a/server.lua b/server.lua new file mode 100644 index 00000000..d0f7978c --- /dev/null +++ b/server.lua @@ -0,0 +1,59 @@ +require 'torch' +require 'nn' + +require 'LanguageModel' + +local turbo = require("turbo") + +local cmd = torch.CmdLine() +cmd:option('-checkpoint', 'cv/checkpoint_4000.t7') +cmd:option('-gpu', 0) +cmd:option('-gpu_backend', 'cuda') +cmd:option('-verbose', 0) +cmd:option('-port', 8888) -- http port to listen +local opt = cmd:parse(arg) + + +local checkpoint = torch.load(opt.checkpoint) +local model = checkpoint.model + +local msg +if opt.gpu >= 0 and opt.gpu_backend == 'cuda' then + require 'cutorch' + require 'cunn' + cutorch.setDevice(opt.gpu + 1) + model:cuda() + msg = string.format('Running with CUDA on GPU %d', opt.gpu) +elseif opt.gpu >= 0 and opt.gpu_backend == 'opencl' then + require 'cltorch' + require 'clnn' + model:cl() + msg = string.format('Running with OpenCL on GPU %d', opt.gpu) +else + msg = 'Running in CPU mode' +end +if opt.verbose == 1 then print(msg) end + +model:evaluate() + + +local SampleHandler = class("SampleHandler", turbo.web.RequestHandler) + +function SampleHandler:get() + -- Get the 'name' argument, or use 'Santa Claus' if it does not exist + opt['length'] = self:get_argument("length", 2000) + opt['start_text'] = self:get_argument("start_text", "") + opt['sample'] = self:get_argument("sample", 1) + opt['temperature'] = self:get_argument("temperature", 1) + + local sample = model:sample(opt) + + self:write(sample) +end + +local app = turbo.web.Application:new({ + {"/sample", SampleHandler} +}) + +app:listen(opt.port) +turbo.ioloop.instance():start()