ocr_det.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "opencv2/core.hpp"
  16. #include "opencv2/imgcodecs.hpp"
  17. #include "opencv2/imgproc.hpp"
  18. #include "paddle_api.h"
  19. #include "paddle_inference_api.h"
  20. #include <chrono>
  21. #include <iomanip>
  22. #include <iostream>
  23. #include <ostream>
  24. #include <vector>
  25. #include <cstring>
  26. #include <fstream>
  27. #include <numeric>
  28. #include <include/postprocess_op.h>
  29. #include <include/preprocess_op.h>
  30. using namespace paddle_infer;
  31. namespace PaddleOCR {
  32. class DBDetector {
  33. public:
  34. explicit DBDetector(const std::string &model_dir, const bool &use_gpu,
  35. const int &gpu_id, const int &gpu_mem,
  36. const int &cpu_math_library_num_threads,
  37. const bool &use_mkldnn, const int &max_side_len,
  38. const double &det_db_thresh,
  39. const double &det_db_box_thresh,
  40. const double &det_db_unclip_ratio, const bool &visualize,
  41. const bool &use_tensorrt, const bool &use_fp16) {
  42. this->use_gpu_ = use_gpu;
  43. this->gpu_id_ = gpu_id;
  44. this->gpu_mem_ = gpu_mem;
  45. this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
  46. this->use_mkldnn_ = use_mkldnn;
  47. this->max_side_len_ = max_side_len;
  48. this->det_db_thresh_ = det_db_thresh;
  49. this->det_db_box_thresh_ = det_db_box_thresh;
  50. this->det_db_unclip_ratio_ = det_db_unclip_ratio;
  51. this->visualize_ = visualize;
  52. this->use_tensorrt_ = use_tensorrt;
  53. this->use_fp16_ = use_fp16;
  54. LoadModel(model_dir);
  55. }
  56. // Load Paddle inference model
  57. void LoadModel(const std::string &model_dir);
  58. // Run predictor
  59. void Run(cv::Mat &img, std::vector<std::vector<std::vector<int>>> &boxes);
  60. private:
  61. std::shared_ptr<Predictor> predictor_;
  62. bool use_gpu_ = false;
  63. int gpu_id_ = 0;
  64. int gpu_mem_ = 4000;
  65. int cpu_math_library_num_threads_ = 4;
  66. bool use_mkldnn_ = false;
  67. int max_side_len_ = 960;
  68. double det_db_thresh_ = 0.3;
  69. double det_db_box_thresh_ = 0.5;
  70. double det_db_unclip_ratio_ = 2.0;
  71. bool visualize_ = true;
  72. bool use_tensorrt_ = false;
  73. bool use_fp16_ = false;
  74. std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
  75. std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
  76. bool is_scale_ = true;
  77. // pre-process
  78. ResizeImgType0 resize_op_;
  79. Normalize normalize_op_;
  80. Permute permute_op_;
  81. // post-process
  82. PostProcessor post_processor_;
  83. };
  84. } // namespace PaddleOCR