Sample Classification Code of CIFAR-10 in Torch

简介: Sample Classification Code of CIFAR-10 in Torch  from: http://torch.ch/blog/2015/07/30/cifar.html    require 'xlua' require 'optim' require 'nn' require 'image' local c = require 'trepl.

Sample Classification Code of CIFAR-10 in Torch 

from: http://torch.ch/blog/2015/07/30/cifar.html 

 

require 'xlua'
require 'optim'
require 'nn'
require 'image'
local c = require 'trepl.colorize'

opt = lapp[[
   -s,--save                  (default "logs")      subdirectory to save logs
   -b,--batchSize             (default 128)          batch size
   -r,--learningRate          (default 1)        learning rate
   --learningRateDecay        (default 1e-7)      learning rate decay
   --weightDecay              (default 0.0005)      weightDecay
   -m,--momentum              (default 0.9)         momentum
   --epoch_step               (default 25)          epoch step
   --model                    (default vgg_bn_drop)     model name
   --max_epoch                (default 300)           maximum number of iterations
   --backend                  (default nn)            backend
   --type                     (default cuda)          cuda/float/cl
]]

print(opt)

do -- data augmentation module
  local BatchFlip,parent = torch.class('nn.BatchFlip', 'nn.Module')

  function BatchFlip:__init()
    parent.__init(self)
    self.train = true
  end

  function BatchFlip:updateOutput(input)
    if self.train then
      local bs = input:size(1)
      local flip_mask = torch.randperm(bs):le(bs/2)
      for i=1,input:size(1) do
        if flip_mask[i] == 1 then image.hflip(input[i], input[i]) end
      end
    end
    self.output:set(input)
    return self.output
  end
end

local function cast(t)
   if opt.type == 'cuda' then
      require 'cunn'
      return t:cuda()
   elseif opt.type == 'float' then
      return t:float()
   elseif opt.type == 'cl' then
      require 'clnn'
      return t:cl()
   else
      error('Unknown type '..opt.type)
   end
end

print(c.blue '==>' ..' configuring model')
local model = nn.Sequential()
model:add(nn.BatchFlip():float())
model:add(cast(nn.Copy('torch.FloatTensor', torch.type(cast(torch.Tensor())))))
model:add(cast(dofile('models/'..opt.model..'.lua')))
model:get(2).updateGradInput = function(input) return end

if opt.backend == 'cudnn' then
   require 'cudnn'
   cudnn.benchmark=true
   cudnn.convert(model:get(3), cudnn)
end

print(model)

print(c.blue '==>' ..' loading data')

-------------------------------------------------------------------------------------------
----------------------------  Load the Train and Test data  -------------------------------
-------------------------------------------------------------------------------------------

local trsize = 50000
local tesize = 10000
-- load dataset
trainData = {
   data = torch.Tensor(50000, 3072),
   labels = torch.Tensor(50000),
   size = function() return trsize end
}
local trainData = trainData
for i = 0,4 do
   local subset = torch.load('cifar-10-batches-t7/data_batch_' .. (i+1) .. '.t7', 'ascii')
   trainData.data[{ {i*10000+1, (i+1)*10000} }] = subset.data:t()
   trainData.labels[{ {i*10000+1, (i+1)*10000} }] = subset.labels
end
trainData.labels = trainData.labels + 1

local subset = torch.load('cifar-10-batches-t7/test_batch.t7', 'ascii')
testData = {
   data = subset.data:t():double(),
   labels = subset.labels[1]:double(),
   size = function() return tesize end
}
local testData = testData
testData.labels = testData.labels + 1

-- resize dataset (if using small version)
trainData.data = trainData.data[{ {1,trsize} }]
trainData.labels = trainData.labels[{ {1,trsize} }]

testData.data = testData.data[{ {1,tesize} }]
testData.labels = testData.labels[{ {1,tesize} }]

-- reshape data
trainData.data = trainData.data:reshape(trsize,3,32,32)
testData.data = testData.data:reshape(tesize,3,32,32)

----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
--    preprocessing data (color space + normalization) 
----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
print '<trainer> preprocessing data (color space + normalization)'
collectgarbage()

-- preprocess trainSet
local normalization = nn.SpatialContrastiveNormalization(1, image.gaussian1D(7))
for i = 1,trainData:size() do
   xlua.progress(i, trainData:size())
   -- rgb -> yuv
   local rgb = trainData.data[i]
   local yuv = image.rgb2yuv(rgb)
   -- normalize y locally:
   yuv[1] = normalization(yuv[{{1}}])
   trainData.data[i] = yuv
end
-- normalize u globally:
local mean_u = trainData.data:select(2,2):mean()
local std_u = trainData.data:select(2,2):std()
trainData.data:select(2,2):add(-mean_u)
trainData.data:select(2,2):div(std_u)
-- normalize v globally:
local mean_v = trainData.data:select(2,3):mean()
local std_v = trainData.data:select(2,3):std()
trainData.data:select(2,3):add(-mean_v)
trainData.data:select(2,3):div(std_v)

trainData.mean_u = mean_u
trainData.std_u = std_u
trainData.mean_v = mean_v
trainData.std_v = std_v

-- preprocess testSet
for i = 1,testData:size() do
  xlua.progress(i, testData:size())
   -- rgb -> yuv
   local rgb = testData.data[i]
   local yuv = image.rgb2yuv(rgb)
   -- normalize y locally:
   yuv[{1}] = normalization(yuv[{{1}}])
   testData.data[i] = yuv
end
-- normalize u globally:
testData.data:select(2,2):add(-mean_u)
testData.data:select(2,2):div(std_u)
-- normalize v globally:
testData.data:select(2,3):add(-mean_v)
testData.data:select(2,3):div(std_v)

----------------------------------------------------------------------------------
-----------------------------   END  ---------------------------------------------

trainData.data = trainData.data:float()
testData.data = testData.data:float()

confusion = optim.ConfusionMatrix(10)

print('Will save at '..opt.save)
paths.mkdir(opt.save)
testLogger = optim.Logger(paths.concat(opt.save, 'test.log'))
testLogger:setNames{'% mean class accuracy (train set)', '% mean class accuracy (test set)'}
testLogger.showPlot = false

parameters,gradParameters = model:getParameters()


print(c.blue'==>' ..' setting criterion')
criterion = cast(nn.CrossEntropyCriterion())


print(c.blue'==>' ..' configuring optimizer')
optimState = {
  learningRate = opt.learningRate,
  weightDecay = opt.weightDecay,
  momentum = opt.momentum,
  learningRateDecay = opt.learningRateDecay,
}


function train()
  model:training()
  epoch = epoch or 1

  -- drop learning rate every "epoch_step" epochs
  if epoch % opt.epoch_step == 0 then optimState.learningRate = optimState.learningRate/2 end
  
  print(c.blue '==>'.." online epoch # " .. epoch .. ' [batchSize = ' .. opt.batchSize .. ']')

  local targets = cast(torch.FloatTensor(opt.batchSize))
  local indices = torch.randperm(trainData.data:size(1)):long():split(opt.batchSize)
  -- remove last element so that all the batches have equal size
  indices[#indices] = nil

  local tic = torch.tic()
  for t,v in ipairs(indices) do
    xlua.progress(t, #indices)

    local inputs = trainData.data:index(1,v)
    targets:copy(trainData.labels:index(1,v))

    local feval = function(x)
      if x ~= parameters then parameters:copy(x) end
      gradParameters:zero()
      
      local outputs = model:forward(inputs)
      local f = criterion:forward(outputs, targets)
      local df_do = criterion:backward(outputs, targets)
      model:backward(inputs, df_do)

      confusion:batchAdd(outputs, targets)

      return f,gradParameters
    end
    optim.sgd(feval, parameters, optimState)
  end

  confusion:updateValids()
  print(('Train accuracy: '..c.cyan'%.2f'..' %%\t time: %.2f s'):format(
        confusion.totalValid * 100, torch.toc(tic)))

  train_acc = confusion.totalValid * 100

  confusion:zero()
  epoch = epoch + 1
end


function test()
  -- disable flips, dropouts and batch normalization
  model:evaluate()
  print(c.blue '==>'.." testing")
  local bs = 125
  for i=1,testData.data:size(1),bs do
    local outputs = model:forward(testData.data:narrow(1,i,bs))
    confusion:batchAdd(outputs, testData.labels:narrow(1,i,bs))
  end

  confusion:updateValids()
  print('Test accuracy:', confusion.totalValid * 100)
  
  if testLogger then
    paths.mkdir(opt.save)
    testLogger:add{train_acc, confusion.totalValid * 100}
    testLogger:style{'-','-'}
    testLogger:plot()

    if paths.filep(opt.save..'/test.log.eps') then
      local base64im
      do
        os.execute(('convert -density 200 %s/test.log.eps %s/test.png'):format(opt.save,opt.save))
        os.execute(('openssl base64 -in %s/test.png -out %s/test.base64'):format(opt.save,opt.save))
        local f = io.open(opt.save..'/test.base64')
        if f then base64im = f:read'*all' end
      end

      local file = io.open(opt.save..'/report.html','w')
      file:write(([[
      <!DOCTYPE html>
      <html>
      <body>
      <title>%s - %s</title>
      <img src="data:image/png;base64,%s">
      <h4>optimState:</h4>
      <table>
      ]]):format(opt.save,epoch,base64im))
      for k,v in pairs(optimState) do
        if torch.type(v) == 'number' then
          file:write('<tr><td>'..k..'</td><td>'..v..'</td></tr>\n')
        end
      end
      file:write'</table><pre>\n'
      file:write(tostring(confusion)..'\n')
      file:write(tostring(model)..'\n')
      file:write'</pre></body></html>'
      file:close()
    end
  end

  -- save model every 50 epochs
  if epoch % 50 == 0 then
    local filename = paths.concat(opt.save, 'model.net')
    print('==> saving model to '..filename)
    torch.save(filename, model:get(3):clearState())
  end

  confusion:zero()
end


for i=1,opt.max_epoch do
  train()
  test()
end
View Code

 

the original version code: 

 

why they written like this ? 

It can not run ... 

 

相关文章
|
1月前
|
机器学习/深度学习 资源调度 监控
PyTorch使用Tricks:Dropout,R-Dropout和Multi-Sample Dropout等 !!
PyTorch使用Tricks:Dropout,R-Dropout和Multi-Sample Dropout等 !!
25 0
|
4月前
|
机器学习/深度学习 自然语言处理 PyTorch
Model Inference
模型推理(Model Inference)是指使用已经训练好的机器学习模型来对新数据进行预测或分类的过程。模型推理是机器学习中的一个重要环节,其目的是利用训练好的模型对新数据进行预测或分类,从而得到结果。
71 1
|
8月前
|
机器学习/深度学习 并行计算 PyTorch
【PyTorch】Training Model
【PyTorch】Training Model
64 0
|
API 数据格式
TensorFlow2._:model.summary() Output Shape为multiple解决方法
TensorFlow2._:model.summary() Output Shape为multiple解决方法
185 0
TensorFlow2._:model.summary() Output Shape为multiple解决方法
|
10月前
|
机器学习/深度学习 PyTorch 算法框架/工具
|
11月前
|
PyTorch 算法框架/工具 异构计算
Pytorch出现RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor)
这个问题的主要原因是输入的数据类型与网络参数的类型不符。
184 0
|
PyTorch 算法框架/工具 索引
详细介绍torch中的from torch.utils.data.sampler相关知识
PyTorch中的torch.utils.data.sampler模块提供了一些用于数据采样的类和函数,这些类和函数可以用于控制如何从数据集中选择样本。下面是一些常用的Sampler类和函数的介绍: Sampler基类: Sampler是一个抽象类,它定义了一个__iter__方法,返回一个迭代器,用于生成数据集中的样本索引。 RandomSampler: 随机采样器,它会随机从数据集中选择样本。可以设置随机数种子,以确保每次采样结果相同。 SequentialSampler: 顺序采样器,它会按照数据集中的顺序,依次选择样本。 SubsetRandomSampler: 子集随机采样器
502 0
|
PyTorch 算法框架/工具
Pytorch Loss Functions总结
Pytorch Loss Functions总结
118 0
|
机器学习/深度学习 PyTorch 算法框架/工具
【论文笔记】Multi-Sample Dropout for Accelerated Training and Better Generalization
【论文笔记】Multi-Sample Dropout for Accelerated Training and Better Generalization
159 0
【论文笔记】Multi-Sample Dropout for Accelerated Training and Better Generalization
成功解决sklearn\ensemble\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an interna
成功解决sklearn\ensemble\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an interna