Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
initial commit
  • Loading branch information
hellbell committed Jul 14, 2017
1 parent 84fdd13 commit 25dafd2
Show file tree
Hide file tree
Showing 572 changed files with 34,222 additions and 0 deletions.
42 changes: 42 additions & 0 deletions +solver/adadelta.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
function [w, state] = adadelta(w, state, grad, opts, ~)
%ADADELTA
% Example AdaDelta solver, for use with CNN_TRAIN and CNN_TRAIN_DAG.
%
% AdaDelta sets its own learning rate, so any learning rate set in the
% options of CNN_TRAIN and CNN_TRAIN_DAG will be ignored.
%
% If called without any input argument, returns the default options
% structure.
%
% Solver options: (opts.train.solverOpts)
%
% `epsilon`:: 1e-6
% Small additive constant to regularize variance estimate.
%
% `rho`:: 0.9
% Moving average window for variance update, between 0 and 1 (larger
% values result in slower/more stable updating).

% Copyright (C) 2016 Joao F. Henriques.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).

if nargin == 0 % Return the default solver options
w = struct('epsilon', 1e-6, 'rho', 0.9) ;
return ;
end

if isequal(state, 0) % First iteration, initialize state struct
state = struct('g_sqr', 0, 'delta_sqr', 0) ;
end

rho = opts.rho ;

state.g_sqr = state.g_sqr * rho + grad.^2 * (1 - rho) ;
new_delta = -sqrt((state.delta_sqr + opts.epsilon) ./ ...
(state.g_sqr + opts.epsilon)) .* grad ;
state.delta_sqr = state.delta_sqr * rho + new_delta.^2 * (1 - rho) ;

w = w + new_delta ;
43 changes: 43 additions & 0 deletions +solver/adagrad.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
function [w, g_sqr] = adagrad(w, g_sqr, grad, opts, lr)
%ADAGRAD
% Example AdaGrad solver, for use with CNN_TRAIN and CNN_TRAIN_DAG.
%
% Set the initial learning rate for AdaGrad in the options for
% CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for
% SGD may be inappropriate for AdaGrad; the default is 0.001.
%
% If called without any input argument, returns the default options
% structure.
%
% Solver options: (opts.train.solverOpts)
%
% `epsilon`:: 1e-10
% Small additive constant to regularize variance estimate.
%
% `rho`:: 1
% Moving average window for variance update, between 0 and 1 (larger
% values result in slower/more stable updating). This is similar to
% RHO in AdaDelta and RMSProp. Standard AdaGrad is obtained with a RHO
% value of 1 (use total average instead of a moving average).
%
% A possibly undesirable effect of standard AdaGrad is that the update
% will monotonically decrease to 0, until training eventually stops. This
% is because the AdaGrad update is inversely proportional to the total
% variance of the gradients seen so far.
% With RHO smaller than 1, a moving average is used instead. This
% prevents the final update from monotonically decreasing to 0.

% Copyright (C) 2016 Joao F. Henriques.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).

if nargin == 0 % Return the default solver options
w = struct('epsilon', 1e-10, 'rho', 1) ;
return ;
end

g_sqr = g_sqr * opts.rho + grad.^2 ;

w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ;
75 changes: 75 additions & 0 deletions +solver/adam.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
function [w, state] = adam(w, state, grad, opts, lr)
%ADAM
% Adam solver for use with CNN_TRAIN and CNN_TRAIN_DAG
%
% See [Kingma et. al., 2014](http://arxiv.org/abs/1412.6980)
% | ([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
%
% If called without any input argument, returns the default options
% structure. Otherwise provide all input arguments.
%
% W is the vector/matrix/tensor of parameters. It can be single/double
% precision and can be a `gpuArray`.
%
% STATE is as defined below and so are supported OPTS.
%
% GRAD is the gradient of the objective w.r.t W
%
% LR is the learning rate, referred to as \alpha by Algorithm 1 in
% [Kingma et. al., 2014].
%
% Solver options: (opts.train.solverOpts)
%
% `beta1`:: 0.9
% Decay for 1st moment vector. See algorithm 1 in [Kingma et.al. 2014]
%
% `beta2`:: 0.999
% Decay for 2nd moment vector
%
% `eps`:: 1e-8
% Additive offset when dividing by state.v
%
% The state is initialized as 0 (number) to start with. The first call to
% this function will initialize it with the default state consisting of
%
% `m`:: 0
% First moment vector
%
% `v`:: 0
% Second moment vector
%
% `t`:: 0
% Global iteration number across epochs
%
% This implementation borrowed from torch optim.adam

% Copyright (C) 2016 Aravindh Mahendran.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).

if nargin == 0 % Returns the default solver options
w = struct('beta1', 0.9, 'beta2', 0.999, 'eps', 1e-8) ;
return ;
end

if isequal(state, 0) % start off with state = 0 so as to get default state
state = struct('m', 0, 'v', 0, 't', 0);
end

% update first moment vector `m`
state.m = opts.beta1 * state.m + (1 - opts.beta1) * grad ;

% update second moment vector `v`
state.v = opts.beta2 * state.v + (1 - opts.beta2) * grad.^2 ;

% update the time step
state.t = state.t + 1 ;

% This implicitly corrects for biased estimates of first and second moment
% vectors
lr_t = lr * (((1 - opts.beta2^state.t)^0.5) / (1 - opts.beta1^state.t)) ;

% Update `w`
w = w - lr_t * state.m ./ (state.v.^0.5 + opts.eps) ;
34 changes: 34 additions & 0 deletions +solver/rmsprop.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
function [w, g_sqr] = rmsprop(w, g_sqr, grad, opts, lr)
%RMSPROP
% Example RMSProp solver, for use with CNN_TRAIN and CNN_TRAIN_DAG.
%
% Set the initial learning rate for RMSProp in the options for
% CNN_TRAIN and CNN_TRAIN_DAG. Note that a learning rate that works for
% SGD may be inappropriate for RMSProp; the default is 0.001.
%
% If called without any input argument, returns the default options
% structure.
%
% Solver options: (opts.train.solverOpts)
%
% `epsilon`:: 1e-8
% Small additive constant to regularize variance estimate.
%
% `rho`:: 0.99
% Moving average window for variance update, between 0 and 1 (larger
% values result in slower/more stable updating).

% Copyright (C) 2016 Joao F. Henriques.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).

if nargin == 0 % Return the default solver options
w = struct('epsilon', 1e-8, 'rho', 0.99) ;
return ;
end

g_sqr = g_sqr * opts.rho + grad.^2 * (1 - opts.rho) ;

w = w - lr * grad ./ (sqrt(g_sqr) + opts.epsilon) ;
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# ADNet-cvpr2017

- ADNet: Action-Decision Networks for Visual Tracking with Deep Reinforcement Learning
- [[Project Page]](https://sites.google.com/view/cvpr2017-adnet) [[Paper]](https://drive.google.com/open?id=0B34VXh5mZ22cZUs2Umc1cjlBMFU)
- [[Homepage]](https://sites.google.com/view/sdyunhome/)

### News
Test codes are uploaded (15 July, 2017).

### Citation
```
@InProceedings{yun2017adnet,
title={Action-Decision Networks for Visual Tracking with Deep Reinforcement Learning},
author={Yun, Sangdoo and Choi, Jongwon and Yoo, Youngjoon and Yun, Kimin and Young Choi, Jin},
booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2017}
}
```

### Installation
- This code is tested on Linux os (ubuntu 14.04) 64bit, MATLAB 2017a, and Cuda-8.0 with NVIDIA GTX 1080 TI.
- Run `adnet_compile.m` to compile MatConvNet.

### Run tracking
- Run `adnet_demo.m`.



28 changes: 28 additions & 0 deletions adnet_compile.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
function adnet_compile
% ADNET_COMPILE
%
% Sangdoo Yun, 2017.
if ispc
cudaRoot = 'C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5';
else
cudaRoot = '/usr/local/cuda-8.0';
end

fprintf('compile matconvnet\n');
run matconvnet/matlab/vl_setupnn.m
cd matconvnet/
vl_compilenn('enableGpu', true, ...
'cudaRoot', cudaRoot, ...
'cudaMethod', 'nvcc');
% vl_compilenn('enableGpu', true);
cd ..

cd utils/
addpath cropRectanglesMex
if ispc
run build_cropRectanglesMex_on_windows.m
else
run build_cropRectanglesMex.m
end
cd ..

25 changes: 25 additions & 0 deletions adnet_demo.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
function [t,p, results] = adnet_demo (vid_path)
% ADNET_DEMO Demonstrate `action-decision network'
%
% Sangdoo Yun, 2017.

if nargin < 1
vid_path = 'data/Freeman1';
end

addpath('test/');
addpath(genpath('utils/'));

init_settings;

run(matconvnet_path);

load('models/net_rl.mat');

opts.visualize = true;
opts.printscreen = true;

rng(1004);
[results, t, p] = test_demo(net, vid_path, opts);
fprintf('precision: %f, fps: %f\n', p(20), size(results, 1)/t);

Loading

0 comments on commit 25dafd2

Please sign in to comment.