infer_rec.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import numpy as np
  18. import os
  19. import sys
  20. __dir__ = os.path.dirname(os.path.abspath(__file__))
  21. sys.path.append(__dir__)
  22. sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
  23. os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
  24. import paddle
  25. from ppocr.data import create_operators, transform
  26. from ppocr.modeling.architectures import build_model
  27. from ppocr.postprocess import build_post_process
  28. from ppocr.utils.save_load import init_model
  29. from ppocr.utils.utility import get_image_file_list
  30. import tools.program as program
  31. def main():
  32. global_config = config['Global']
  33. # build post process
  34. post_process_class = build_post_process(config['PostProcess'],
  35. global_config)
  36. # build model
  37. if hasattr(post_process_class, 'character'):
  38. config['Architecture']["Head"]['out_channels'] = len(
  39. getattr(post_process_class, 'character'))
  40. model = build_model(config['Architecture'])
  41. init_model(config, model, logger)
  42. # create data ops
  43. transforms = []
  44. for op in config['Eval']['dataset']['transforms']:
  45. op_name = list(op)[0]
  46. if 'Label' in op_name:
  47. continue
  48. elif op_name in ['RecResizeImg']:
  49. op[op_name]['infer_mode'] = True
  50. elif op_name == 'KeepKeys':
  51. if config['Architecture']['algorithm'] == "SRN":
  52. op[op_name]['keep_keys'] = [
  53. 'image', 'encoder_word_pos', 'gsrm_word_pos',
  54. 'gsrm_slf_attn_bias1', 'gsrm_slf_attn_bias2'
  55. ]
  56. else:
  57. op[op_name]['keep_keys'] = ['image']
  58. transforms.append(op)
  59. global_config['infer_mode'] = True
  60. ops = create_operators(transforms, global_config)
  61. model.eval()
  62. for file in get_image_file_list(config['Global']['infer_img']):
  63. logger.info("infer_img: {}".format(file))
  64. with open(file, 'rb') as f:
  65. img = f.read()
  66. data = {'image': img}
  67. batch = transform(data, ops)
  68. if config['Architecture']['algorithm'] == "SRN":
  69. encoder_word_pos_list = np.expand_dims(batch[1], axis=0)
  70. gsrm_word_pos_list = np.expand_dims(batch[2], axis=0)
  71. gsrm_slf_attn_bias1_list = np.expand_dims(batch[3], axis=0)
  72. gsrm_slf_attn_bias2_list = np.expand_dims(batch[4], axis=0)
  73. others = [
  74. paddle.to_tensor(encoder_word_pos_list),
  75. paddle.to_tensor(gsrm_word_pos_list),
  76. paddle.to_tensor(gsrm_slf_attn_bias1_list),
  77. paddle.to_tensor(gsrm_slf_attn_bias2_list)
  78. ]
  79. images = np.expand_dims(batch[0], axis=0)
  80. images = paddle.to_tensor(images)
  81. if config['Architecture']['algorithm'] == "SRN":
  82. preds = model(images, others)
  83. else:
  84. preds = model(images)
  85. post_result = post_process_class(preds)
  86. for rec_reuslt in post_result:
  87. logger.info('\t result: {}'.format(rec_reuslt))
  88. logger.info("success!")
  89. if __name__ == '__main__':
  90. config, device, logger, vdl_writer = program.preprocess()
  91. main()