test code for PETA datasets ....
1 #ifdef WITH_PYTHON_LAYER
2 #include "boost/python.hpp"
3 namespace bp = boost::python;
4 #endif
5
6 #include <glog/logging.h>
7
8 #include <cstring>
9 #include <map>
10 #include <string>
11 #include <vector>
12
13 #include "boost/algorithm/string.hpp"
14 #include "caffe/caffe.hpp"
15 #include "caffe/util/signal_handler.h"
16 #include <fstream>
17 #include <sstream>
18 #include <iostream>
19
20 using caffe::Blob;
21 using caffe::Caffe;
22 using caffe::Net;
23 using caffe::Layer;
24 using caffe::Solver;
25 using caffe::shared_ptr;
26 using caffe::string;
27 using caffe::Timer;
28 using caffe::vector;
29 using std::ostringstream;
30 using namespace std;
31
32 DEFINE_string(gpu, "",
33 "Optional; run in GPU mode on given device IDs separated by ','."
34 "Use '-gpu all' to run on all available GPUs. The effective training "
35 "batch size is multiplied by the number of devices.");
36 DEFINE_string(solver, "",
37 "The solver definition protocol buffer text file.");
38 DEFINE_string(model, "",
39 "The model definition protocol buffer text file..");
40 DEFINE_string(snapshot, "",
41 "Optional; the snapshot solver state to resume training.");
42 DEFINE_string(weights, "",
43 "Optional; the pretrained weights to initialize finetuning, "
44 "separated by ','. Cannot be set simultaneously with snapshot.");
45 // DEFINE_int32(iteratinos, 29329,
46 DEFINE_int32(iterations, 7615,
47 "The number of iterations to run.");
48 DEFINE_string(sigint_effect, "stop",
49 "Optional; action to take when a SIGINT signal is received: "
50 "snapshot, stop or none.");
51 DEFINE_string(sighup_effect, "snapshot",
52 "Optional; action to take when a SIGHUP signal is received: "
53 "snapshot, stop or none.");
54
55 // A simple registry for caffe commands.
56 typedef int (*BrewFunction)();
57 typedef std::map<caffe::string, BrewFunction> BrewMap;
58 BrewMap g_brew_map;
59
60 #define RegisterBrewFunction(func) \
61 namespace { \
62 class __Registerer_##func { \
63 public: /* NOLINT */ \
64 __Registerer_##func() { \
65 g_brew_map[#func] = &func; \
66 } \
67 }; \
68 __Registerer_##func g_registerer_##func; \
69 }
70
71 static BrewFunction GetBrewFunction(const caffe::string& name) {
72 if (g_brew_map.count(name)) {
73 return g_brew_map[name];
74 } else {
75 LOG(ERROR) << "Available caffe actions:";
76 for (BrewMap::iterator it = g_brew_map.begin();
77 it != g_brew_map.end(); ++it) {
78 LOG(ERROR) << "\t" << it->first;
79 }
80 LOG(FATAL) << "Unknown action: " << name;
81 return NULL; // not reachable, just to suppress old compiler warnings.
82 }
83 }
84
85 // Parse GPU ids or use all available devices
86 static void get_gpus(vector<int>* gpus) {
87 if (FLAGS_gpu == "all") {
88 int count = 0;
89 #ifndef CPU_ONLY
90 CUDA_CHECK(cudaGetDeviceCount(&count));
91 #else
92 NO_GPU;
93 #endif
94 for (int i = 0; i < count; ++i) {
95 gpus->push_back(i);
96 }
97 } else if (FLAGS_gpu.size()) {
98 vector<string> strings;
99 boost::split(strings, FLAGS_gpu, boost::is_any_of(","));
100 for (int i = 0; i < strings.size(); ++i) {
101 gpus->push_back(boost::lexical_cast<int>(strings[i]));
102 }
103 } else {
104 CHECK_EQ(gpus->size(), 0);
105 }
106 }
107
108 // caffe commands to call by
109 // caffe <command> <args>
110 //
111 // To add a command, define a function "int command()" and register it with
112 // RegisterBrewFunction(action);
113
114 // Device Query: show diagnostic information for a GPU device.
115 int device_query() {
116 LOG(INFO) << "Querying GPUs " << FLAGS_gpu;
117 vector<int> gpus;
118 get_gpus(&gpus);
119 for (int i = 0; i < gpus.size(); ++i) {
120 caffe::Caffe::SetDevice(gpus[i]);
121 caffe::Caffe::DeviceQuery();
122 }
123 return 0;
124 }
125 RegisterBrewFunction(device_query);
126
127 // Load the weights from the specified caffemodel(s) into the train and
128 // test nets.
129 void CopyLayers(caffe::Solver<float>* solver, const std::string& model_list) {
130 std::vector<std::string> model_names;
131 boost::split(model_names, model_list, boost::is_any_of(",") );
132 for (int i = 0; i < model_names.size(); ++i) {
133 LOG(INFO) << "Finetuning from " << model_names[i];
134 solver->net()->CopyTrainedLayersFrom(model_names[i]);
135 for (int j = 0; j < solver->test_nets().size(); ++j) {
136 solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]);
137 }
138 }
139 }
140
141 // Translate the signal effect the user specified on the command-line to the
142 // corresponding enumeration.
143 caffe::SolverAction::Enum GetRequestedAction(
144 const std::string& flag_value) {
145 if (flag_value == "stop") {
146 return caffe::SolverAction::STOP;
147 }
148 if (flag_value == "snapshot") {
149 return caffe::SolverAction::SNAPSHOT;
150 }
151 if (flag_value == "none") {
152 return caffe::SolverAction::NONE;
153 }
154 LOG(FATAL) << "Invalid signal effect \""<< flag_value << "\" was specified";
155 }
156
157 // Train / Finetune a model.
158 int train() {
159 CHECK_GT(FLAGS_solver.size(), 0) << "Need a solver definition to train.";
160 CHECK(!FLAGS_snapshot.size() || !FLAGS_weights.size())
161 << "Give a snapshot to resume training or weights to finetune "
162 "but not both.";
163
164 caffe::SolverParameter solver_param;
165 caffe::ReadProtoFromTextFileOrDie(FLAGS_solver, &solver_param);
166
167 // If the gpus flag is not provided, allow the mode and device to be set
168 // in the solver prototxt.
169 if (FLAGS_gpu.size() == 0
170 && solver_param.solver_mode() == caffe::SolverParameter_SolverMode_GPU) {
171 if (solver_param.has_device_id()) {
172 FLAGS_gpu = "" +
173 boost::lexical_cast<string>(solver_param.device_id());
174 } else { // Set default GPU if unspecified
175 FLAGS_gpu = "" + boost::lexical_cast<string>(0);
176 }
177 }
178
179 vector<int> gpus;
180 get_gpus(&gpus);
181 if (gpus.size() == 0) {
182 LOG(INFO) << "Use CPU.";
183 Caffe::set_mode(Caffe::CPU);
184 } else {
185 ostringstream s;
186 for (int i = 0; i < gpus.size(); ++i) {
187 s << (i ? ", " : "") << gpus[i];
188 }
189 LOG(INFO) << "Using GPUs " << s.str();
190
191 solver_param.set_device_id(gpus[0]);
192 Caffe::SetDevice(gpus[0]);
193 Caffe::set_mode(Caffe::GPU);
194 Caffe::set_solver_count(gpus.size());
195 }
196
197 caffe::SignalHandler signal_handler(
198 GetRequestedAction(FLAGS_sigint_effect),
199 GetRequestedAction(FLAGS_sighup_effect));
200
201 shared_ptr<caffe::Solver<float> >
202 solver(caffe::GetSolver<float>(solver_param));
203
204 solver->SetActionFunction(signal_handler.GetActionFunction());
205
206 if (FLAGS_snapshot.size()) {
207 LOG(INFO) << "Resuming from " << FLAGS_snapshot;
208 solver->Restore(FLAGS_snapshot.c_str());
209 } else if (FLAGS_weights.size()) {
210 CopyLayers(solver.get(), FLAGS_weights);
211 }
212
213 if (gpus.size() > 1) {
214 caffe::P2PSync<float> sync(solver, NULL, solver->param());
215 sync.run(gpus);
216 } else {
217 LOG(INFO) << "Starting Optimization";
218 solver->Solve();
219 }
220 LOG(INFO) << "Optimization Done.";
221 return 0;
222 }
223 RegisterBrewFunction(train);
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 // Test: score a model.
249 int test() {
250 CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score.";
251 CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score.";
252
253 // Set device id and mode
254 vector<int> gpus;
255 get_gpus(&gpus);
256 if (gpus.size() != 0) {
257 LOG(INFO) << "Use GPU with device ID " << gpus[0];
258 Caffe::SetDevice(gpus[0]);
259 Caffe::set_mode(Caffe::GPU);
260 } else {
261 LOG(INFO) << "Use CPU.";
262 Caffe::set_mode(Caffe::CPU);
263 }
264
265 // Instantiate the caffe net.
266 Net<float> caffe_net(FLAGS_model, caffe::TEST);
267 caffe_net.CopyTrainedLayersFrom(FLAGS_weights);
268 LOG(INFO) << "Running for " << FLAGS_iterations << " iterations.";
269
270 vector<Blob<float>* > bottom_vec;
271 vector<int> test_score_output_id;
272 vector<float> test_score;
273 // float loss = 0;
274 int nu = 0;
275 // int num = 0;
276 // int num2 = 0;
277
278 const int att_num = 43;
279 static int TP[att_num] = {0};
280 static int TN[att_num] = {0};
281 static int FP[att_num] = {0};
282 static int FN[att_num] = {0};
283
284 for (int i = 0; i < FLAGS_iterations; ++i) { // num of images; test image:7600
285
286 LOG(INFO) << "batch " << i << "/" << FLAGS_iterations << ", waiting...";
287
288 float iter_loss;
289 const vector<Blob<float>*>& result =
290 caffe_net.Forward(bottom_vec, &iter_loss); // outPut result
291
292 LOG(INFO) << "result.size: " << result.size() << " " << result[0]->count() << " " << result[1]->count()
293 << " " << result[2]->count() ;
294
295 const float* result_index = result[0]->cpu_data(); // index
296 const float* result_score = result[1]->cpu_data(); // predict score;
297 const float* result_label = result[2]->cpu_data(); // Groundtruth label;
298
299 for (int k = 0; k < att_num; ++k) { // for 35 attributes
300
301 // const float index_ = result_index;
302 const float Predict_score = result_score[k];
303 const float GT_label = result_label[k];
304
305 float threshold_ = 0.4;
306 if ((Predict_score < threshold_) && (int(GT_label) == 0))
307 TN[k] = TN[k] + 1;
308 if ((Predict_score > threshold_) && (int(GT_label) == 0))
309 FP[k] = FP[k] + 1;
310 if ((Predict_score < threshold_) && (int(GT_label) == 1))
311 FN[k] = FN[k] + 1;
312 if ((Predict_score > threshold_) && (int(GT_label) == 1))
313 TP[k] = TP[k] + 1;
314
315
316 // write the predicted score into txt files
317 ofstream file("/home/wangxiao/Downloads/whole_benchmark/Sec_Batch_/sec_Batch_unlabel.txt",ios::app);
318 if(!file) return -1;
319
320 if(nu < att_num){
321 file << Predict_score << " " ;
322 nu++;
323 } else {
324 nu = 1;
325 file << endl;
326 file << Predict_score << " " ;
327 }
328 file.close();
329
330
331 // // write the Index into txt files
332 // ofstream file2("/home/wangxiao/Downloads/caffe-master/wangxiao/bvlc_alexnet/43_attributes_index.txt",ios::app);
333 // if(!file2) return -1;
334
335 // if(num < att_num){
336 // file2 << index_ << " " ;
337 // num++;
338 // } else {
339 // num = 1;
340 // file2 << endl;
341 // file2 << index_ << " " ;
342 // }
343 // file2.close();
344
345 // // write the GroundTruth Label into txt files
346 // ofstream file3("/home/wangxiao/Downloads/whole_benchmark/Unlabeled_data/Unlabeled_AnHui_label.txt",ios::app);
347 // if(!file3) return -1;
348
349 // if(num2 < att_num){
350 // file3 << GT_label << " " ;
351 // num2++;
352 // } else {
353 // num2 = 1;
354 // file3 << endl;
355 // file3 << GT_label << " " ;
356 // }
357 // file3.close();
358
359
360 }
361 }
362
363 double aver_accuracy = 0;
364
365 for (int k = 0; k < att_num; ++k){
366 aver_accuracy += 0.5*(double(TP[k])/(TP[k]+FN[k]) + double(TN[k])/(TN[k] + FP[k]));
367 LOG(INFO) << "Accuracy: " << k << " "
368 << 0.5*(double(TP[k])/(TP[k]+FN[k]) + double(TN[k])/(TN[k] + FP[k]));
369 LOG(INFO) << "TP = " << double(TP[k]);
370 LOG(INFO) << "FN = " << double(FN[k]);
371 LOG(INFO) << "FP = " << double(FP[k]);
372 LOG(INFO) << "TN = " << double(TN[k]);
373 }
374
375 LOG(INFO) << "####################";
376 LOG(INFO) << " ";
377 LOG(INFO) << "Average_accuracy: " << aver_accuracy / att_num;
378 LOG(INFO) << " ";
379 LOG(INFO) << "####################";
380
381 return 0;
382 }
383
384 RegisterBrewFunction(test);
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409 // Time: benchmark the execution time of a model.
410 int time() {
411 CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to time.";
412
413 // Set device id and mode
414 vector<int> gpus;
415 get_gpus(&gpus);
416 if (gpus.size() != 0) {
417 LOG(INFO) << "Use GPU with device ID " << gpus[0];
418 Caffe::SetDevice(gpus[0]);
419 Caffe::set_mode(Caffe::GPU);
420 } else {
421 LOG(INFO) << "Use CPU.";
422 Caffe::set_mode(Caffe::CPU);
423 }
424 // Instantiate the caffe net.
425 Net<float> caffe_net(FLAGS_model, caffe::TRAIN);
426
427 // Do a clean forward and backward pass, so that memory allocation are done
428 // and future iterations will be more stable.
429 LOG(INFO) << "Performing Forward";
430 // Note that for the speed benchmark, we will assume that the network does
431 // not take any input blobs.
432 float initial_loss;
433 caffe_net.Forward(vector<Blob<float>*>(), &initial_loss);
434 LOG(INFO) << "Initial loss: " << initial_loss;
435 LOG(INFO) << "Performing Backward";
436 caffe_net.Backward();
437
438 const vector<shared_ptr<Layer<float> > >& layers = caffe_net.layers();
439 const vector<vector<Blob<float>*> >& bottom_vecs = caffe_net.bottom_vecs();
440 const vector<vector<Blob<float>*> >& top_vecs = caffe_net.top_vecs();
441 const vector<vector<bool> >& bottom_need_backward =
442 caffe_net.bottom_need_backward();
443 LOG(INFO) << "*** Benchmark begins ***";
444 LOG(INFO) << "Testing for " << FLAGS_iterations << " iterations.";
445 Timer total_timer;
446 total_timer.Start();
447 Timer forward_timer;
448 Timer backward_timer;
449 Timer timer;
450 std::vector<double> forward_time_per_layer(layers.size(), 0.0);
451 std::vector<double> backward_time_per_layer(layers.size(), 0.0);
452 double forward_time = 0.0;
453 double backward_time = 0.0;
454 for (int j = 0; j < FLAGS_iterations; ++j) {
455 Timer iter_timer;
456 iter_timer.Start();
457 forward_timer.Start();
458 for (int i = 0; i < layers.size(); ++i) {
459 timer.Start();
460 layers[i]->Forward(bottom_vecs[i], top_vecs[i]);
461 forward_time_per_layer[i] += timer.MicroSeconds();
462 }
463 forward_time += forward_timer.MicroSeconds();
464 backward_timer.Start();
465 for (int i = layers.size() - 1; i >= 0; --i) {
466 timer.Start();
467 layers[i]->Backward(top_vecs[i], bottom_need_backward[i],
468 bottom_vecs[i]);
469 backward_time_per_layer[i] += timer.MicroSeconds();
470 }
471 backward_time += backward_timer.MicroSeconds();
472 LOG(INFO) << "Iteration: " << j + 1 << " forward-backward time: "
473 << iter_timer.MilliSeconds() << " ms.";
474 }
475 LOG(INFO) << "Average time per layer: ";
476 for (int i = 0; i < layers.size(); ++i) {
477 const caffe::string& layername = layers[i]->layer_param().name();
478 LOG(INFO) << std::setfill(' ') << std::setw(10) << layername <<
479 "\tforward: " << forward_time_per_layer[i] / 1000 /
480 FLAGS_iterations << " ms.";
481 LOG(INFO) << std::setfill(' ') << std::setw(10) << layername <<
482 "\tbackward: " << backward_time_per_layer[i] / 1000 /
483 FLAGS_iterations << " ms.";
484 }
485 total_timer.Stop();
486 LOG(INFO) << "Average Forward pass: " << forward_time / 1000 /
487 FLAGS_iterations << " ms.";
488 LOG(INFO) << "Average Backward pass: " << backward_time / 1000 /
489 FLAGS_iterations << " ms.";
490 LOG(INFO) << "Average Forward-Backward: " << total_timer.MilliSeconds() /
491 FLAGS_iterations << " ms.";
492 LOG(INFO) << "Total Time: " << total_timer.MilliSeconds() << " ms.";
493 LOG(INFO) << "*** Benchmark ends ***";
494 return 0;
495 }
496 RegisterBrewFunction(time);
497
498 int main(int argc, char** argv) {
499 // Print output to stderr (while still logging).
500 FLAGS_alsologtostderr = 1;
501 // Usage message.
502 gflags::SetUsageMessage("command line brew\n"
503 "usage: caffe <command> <args>\n\n"
504 "commands:\n"
505 " train train or finetune a model\n"
506 " test score a model\n"
507 " device_query show GPU diagnostic information\n"
508 " time benchmark model execution time");
509 // Run tool or show usage.
510 caffe::GlobalInit(&argc, &argv);
511 if (argc == 2) {
512 #ifdef WITH_PYTHON_LAYER
513 try {
514 #endif
515 return GetBrewFunction(caffe::string(argv[1]))();
516 #ifdef WITH_PYTHON_LAYER
517 } catch (bp::error_already_set) {
518 PyErr_Print();
519 return 1;
520 }
521 #endif
522 } else {
523 gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/caffe");
524 }
525 }