1# Copyright 2016 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import os
16import unittest
17
18import cv2
19import its.caps
20import its.device
21import its.error
22import its.image
23import numpy
24
25CHART_FILE = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules', 'its',
26                          'test_images', 'ISO12233.png')
27CHART_HEIGHT = 13.5  # cm
28CHART_DISTANCE_RFOV = 30.0  # cm
29CHART_DISTANCE_WFOV = 22.0  # cm
30CHART_SCALE_START = 0.65
31CHART_SCALE_STOP = 1.35
32CHART_SCALE_STEP = 0.025
33
34FOV_THRESH_TELE = 60
35FOV_THRESH_WFOV = 90
36
37SCALE_RFOV_IN_WFOV_BOX = 0.67
38SCALE_TELE_IN_RFOV_BOX = 0.67
39SCALE_TELE_IN_WFOV_BOX = 0.5
40
41VGA_HEIGHT = 480
42VGA_WIDTH = 640
43
44
45def calc_chart_scaling(chart_distance, camera_fov):
46    chart_scaling = 1.0
47    camera_fov = float(camera_fov)
48    if (FOV_THRESH_TELE < camera_fov < FOV_THRESH_WFOV and
49                numpy.isclose(chart_distance, CHART_DISTANCE_WFOV, rtol=0.1)):
50        chart_scaling = SCALE_RFOV_IN_WFOV_BOX
51    elif (camera_fov <= FOV_THRESH_TELE and
52          numpy.isclose(chart_distance, CHART_DISTANCE_WFOV, rtol=0.1)):
53        chart_scaling = SCALE_TELE_IN_WFOV_BOX
54    elif (camera_fov <= FOV_THRESH_TELE and
55          numpy.isclose(chart_distance, CHART_DISTANCE_RFOV, rtol=0.1)):
56        chart_scaling = SCALE_TELE_IN_RFOV_BOX
57    return chart_scaling
58
59
60def scale_img(img, scale=1.0):
61    """Scale and image based on a real number scale factor."""
62    dim = (int(img.shape[1]*scale), int(img.shape[0]*scale))
63    return cv2.resize(img.copy(), dim, interpolation=cv2.INTER_AREA)
64
65
66def gray_scale_img(img):
67    """Return gray scale version of image."""
68    if len(img.shape) == 2:
69        img_gray = img.copy()
70    elif len(img.shape) == 3:
71        if img.shape[2] == 1:
72            img_gray = img[:, :, 0].copy()
73        else:
74            img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
75    return img_gray
76
77
78class Chart(object):
79    """Definition for chart object.
80
81    Defines PNG reference file, chart size and distance, and scaling range.
82    """
83
84    def __init__(self, chart_file=None, height=None, distance=None,
85                 scale_start=None, scale_stop=None, scale_step=None,
86                 camera_id=None):
87        """Initial constructor for class.
88
89        Args:
90            chart_file:     str; absolute path to png file of chart
91            height:         float; height in cm of displayed chart
92            distance:       float; distance in cm from camera of displayed chart
93            scale_start:    float; start value for scaling for chart search
94            scale_stop:     float; stop value for scaling for chart search
95            scale_step:     float; step value for scaling for chart search
96            camera_id:      int; camera used for extractor
97        """
98        self._file = chart_file or CHART_FILE
99        self._height = height or CHART_HEIGHT
100        self._distance = distance or CHART_DISTANCE_RFOV
101        self._scale_start = scale_start or CHART_SCALE_START
102        self._scale_stop = scale_stop or CHART_SCALE_STOP
103        self._scale_step = scale_step or CHART_SCALE_STEP
104        self.xnorm, self.ynorm, self.wnorm, self.hnorm, self.scale = its.image.chart_located_per_argv()
105        if not self.xnorm:
106            with its.device.ItsSession(camera_id) as cam:
107                props = cam.get_camera_properties()
108                if its.caps.read_3a(props):
109                    self.locate(cam, props)
110                else:
111                    print 'Chart locator skipped.'
112                    self._set_scale_factors_to_one()
113
114    def _set_scale_factors_to_one(self):
115        """Set scale factors to 1.0 for skipped tests."""
116        self.wnorm = 1.0
117        self.hnorm = 1.0
118        self.xnorm = 0.0
119        self.ynorm = 0.0
120        self.scale = 1.0
121
122    def _calc_scale_factors(self, cam, props, fmt, s, e, fd):
123        """Take an image with s, e, & fd to find the chart location.
124
125        Args:
126            cam:            An open device session.
127            props:          Properties of cam
128            fmt:            Image format for the capture
129            s:              Sensitivity for the AF request as defined in
130                            android.sensor.sensitivity
131            e:              Exposure time for the AF request as defined in
132                            android.sensor.exposureTime
133            fd:             float; autofocus lens position
134        Returns:
135            template:       numpy array; chart template for locator
136            img_3a:         numpy array; RGB image for chart location
137            scale_factor:   float; scaling factor for chart search
138        """
139        req = its.objects.manual_capture_request(s, e)
140        req['android.lens.focusDistance'] = fd
141        cap_chart = its.image.stationary_lens_cap(cam, req, fmt)
142        img_3a = its.image.convert_capture_to_rgb_image(cap_chart, props)
143        img_3a = its.image.rotate_img_per_argv(img_3a)
144        its.image.write_image(img_3a, 'af_scene.jpg')
145        template = cv2.imread(self._file, cv2.IMREAD_ANYDEPTH)
146        focal_l = cap_chart['metadata']['android.lens.focalLength']
147        pixel_pitch = (props['android.sensor.info.physicalSize']['height'] /
148                       img_3a.shape[0])
149        print ' Chart distance: %.2fcm' % self._distance
150        print ' Chart height: %.2fcm' % self._height
151        print ' Focal length: %.2fmm' % focal_l
152        print ' Pixel pitch: %.2fum' % (pixel_pitch*1E3)
153        print ' Template height: %dpixels' % template.shape[0]
154        chart_pixel_h = self._height * focal_l / (self._distance * pixel_pitch)
155        scale_factor = template.shape[0] / chart_pixel_h
156        print 'Chart/image scale factor = %.2f' % scale_factor
157        return template, img_3a, scale_factor
158
159    def locate(self, cam, props):
160        """Find the chart in the image, and append location to chart object.
161
162        The values appended are:
163            xnorm:          float; [0, 1] left loc of chart in scene
164            ynorm:          float; [0, 1] top loc of chart in scene
165            wnorm:          float; [0, 1] width of chart in scene
166            hnorm:          float; [0, 1] height of chart in scene
167            scale:          float; scale factor to extract chart
168
169        Args:
170            cam:            An open device session
171            props:          Camera properties
172        """
173        if its.caps.read_3a(props):
174            s, e, _, _, fd = cam.do_3a(get_results=True)
175            fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}
176            chart, scene, s_factor = self._calc_scale_factors(cam, props, fmt,
177                                                              s, e, fd)
178        else:
179            print 'Chart locator skipped.'
180            self._set_scale_factors_to_one()
181            return
182        scale_start = self._scale_start * s_factor
183        scale_stop = self._scale_stop * s_factor
184        scale_step = self._scale_step * s_factor
185        self.scale = s_factor
186        max_match = []
187        # check for normalized image
188        if numpy.amax(scene) <= 1.0:
189            scene = (scene * 255.0).astype(numpy.uint8)
190        scene_gray = gray_scale_img(scene)
191        print 'Finding chart in scene...'
192        for scale in numpy.arange(scale_start, scale_stop, scale_step):
193            scene_scaled = scale_img(scene_gray, scale)
194            if (scene_scaled.shape[0] < chart.shape[0] or
195                        scene_scaled.shape[1] < chart.shape[1]):
196                continue
197            result = cv2.matchTemplate(scene_scaled, chart, cv2.TM_CCOEFF)
198            _, opt_val, _, top_left_scaled = cv2.minMaxLoc(result)
199            # print out scale and match
200            print ' scale factor: %.3f, opt val: %.f' % (scale, opt_val)
201            max_match.append((opt_val, top_left_scaled))
202
203        # determine if optimization results are valid
204        opt_values = [x[0] for x in max_match]
205        if 2.0*min(opt_values) > max(opt_values):
206            estring = ('Warning: unable to find chart in scene!\n'
207                       'Check camera distance and self-reported '
208                       'pixel pitch, focal length and hyperfocal distance.')
209            print estring
210            self._set_scale_factors_to_one()
211        else:
212            if (max(opt_values) == opt_values[0] or
213                        max(opt_values) == opt_values[len(opt_values)-1]):
214                estring = ('Warning: chart is at extreme range of locator '
215                           'check.\n')
216                print estring
217            # find max and draw bbox
218            match_index = max_match.index(max(max_match, key=lambda x: x[0]))
219            self.scale = scale_start + scale_step * match_index
220            print 'Optimum scale factor: %.3f' %  self.scale
221            top_left_scaled = max_match[match_index][1]
222            h, w = chart.shape
223            bottom_right_scaled = (top_left_scaled[0] + w,
224                                   top_left_scaled[1] + h)
225            top_left = (int(top_left_scaled[0]/self.scale),
226                        int(top_left_scaled[1]/self.scale))
227            bottom_right = (int(bottom_right_scaled[0]/self.scale),
228                            int(bottom_right_scaled[1]/self.scale))
229            self.wnorm = float((bottom_right[0]) - top_left[0]) / scene.shape[1]
230            self.hnorm = float((bottom_right[1]) - top_left[1]) / scene.shape[0]
231            self.xnorm = float(top_left[0]) / scene.shape[1]
232            self.ynorm = float(top_left[1]) / scene.shape[0]
233
234
235def get_angle(input_img):
236    """Computes anglular inclination of chessboard in input_img.
237
238    Angle estimation algoritm description:
239        Input: 2D grayscale image of chessboard.
240        Output: Angle of rotation of chessboard perpendicular to
241            chessboard. Assumes chessboard and camera are parallel to
242            each other.
243
244        1) Use adaptive threshold to make image binary
245        2) Find countours
246        3) Filter out small contours
247        4) Filter out all non-square contours
248        5) Compute most common square shape.
249            The assumption here is that the most common square instances
250            are the chessboard squares. We've shown that with our current
251            tuning, we can robustly identify the squares on the sensor fusion
252            chessboard.
253        6) Return median angle of most common square shape.
254
255    USAGE NOTE: This function has been tuned to work for the chessboard used in
256    the sensor_fusion tests. See images in test_images/rotated_chessboard/ for
257    sample captures. If this function is used with other chessboards, it may not
258    work as expected.
259
260    TODO: Make algorithm more robust so it works on any type of
261    chessboard.
262
263    Args:
264        input_img (2D numpy.ndarray): Grayscale image stored as a 2D
265            numpy array.
266
267    Returns:
268        Median angle of squares in degrees identified in the image.
269    """
270    # Tuning parameters
271    min_square_area = (float)(input_img.shape[1] * 0.05)
272
273    # Creates copy of image to avoid modifying original.
274    img = numpy.array(input_img, copy=True)
275
276    # Scale pixel values from 0-1 to 0-255
277    img *= 255
278    img = img.astype(numpy.uint8)
279
280    thresh = cv2.adaptiveThreshold(
281            img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 201, 2)
282
283    # Find all contours
284    contours = []
285    cv2_version = cv2.__version__
286    if cv2_version.startswith('2.4.'):
287        contours, _ = cv2.findContours(
288                thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
289    elif cv2_version.startswith('3.2.'):
290        _, contours, _ = cv2.findContours(
291                thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
292
293    # Filter contours to squares only.
294    square_contours = []
295
296    for contour in contours:
297        rect = cv2.minAreaRect(contour)
298        _, (width, height), angle = rect
299
300        # Skip non-squares (with 0.1 tolerance)
301        tolerance = 0.1
302        if width < height * (1 - tolerance) or width > height * (1 + tolerance):
303            continue
304
305        # Remove very small contours.
306        # These are usually just tiny dots due to noise.
307        area = cv2.contourArea(contour)
308        if area < min_square_area:
309            continue
310
311        if cv2_version.startswith('2.4.'):
312            box = numpy.int0(cv2.cv.BoxPoints(rect))
313        elif cv2_version.startswith('3.2.'):
314            box = numpy.int0(cv2.boxPoints(rect))
315        square_contours.append(contour)
316
317    areas = []
318    for contour in square_contours:
319        area = cv2.contourArea(contour)
320        areas.append(area)
321
322    median_area = numpy.median(areas)
323
324    filtered_squares = []
325    filtered_angles = []
326    for square in square_contours:
327        area = cv2.contourArea(square)
328        if area < median_area * 0.90 or area > median_area * 1.10:
329            continue
330
331        filtered_squares.append(square)
332        _, (width, height), angle = cv2.minAreaRect(square)
333        filtered_angles.append(angle)
334
335    if len(filtered_angles) < 10:
336        return None
337
338    return numpy.median(filtered_angles)
339
340
341class __UnitTest(unittest.TestCase):
342    """Run a suite of unit tests on this module.
343    """
344
345    def test_compute_image_sharpness(self):
346        """Unit test for compute_img_sharpness.
347
348        Test by using PNG of ISO12233 chart and blurring intentionally.
349        'sharpness' should drop off by sqrt(2) for 2x blur of image.
350
351        We do one level of blur as PNG image is not perfect.
352        """
353        yuv_full_scale = 1023.0
354        chart_file = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules',
355                                  'its', 'test_images', 'ISO12233.png')
356        chart = cv2.imread(chart_file, cv2.IMREAD_ANYDEPTH)
357        white_level = numpy.amax(chart).astype(float)
358        sharpness = {}
359        for j in [2, 4, 8]:
360            blur = cv2.blur(chart, (j, j))
361            blur = blur[:, :, numpy.newaxis]
362            sharpness[j] = (yuv_full_scale *
363                            its.image.compute_image_sharpness(blur /
364                                                              white_level))
365        self.assertTrue(numpy.isclose(sharpness[2]/sharpness[4],
366                                      numpy.sqrt(2), atol=0.1))
367        self.assertTrue(numpy.isclose(sharpness[4]/sharpness[8],
368                                      numpy.sqrt(2), atol=0.1))
369
370    def test_get_angle_identify_unrotated_chessboard_angle(self):
371        basedir = os.path.join(
372                os.path.dirname(__file__), 'test_images/rotated_chessboards/')
373
374        normal_img_path = os.path.join(basedir, 'normal.jpg')
375        wide_img_path = os.path.join(basedir, 'wide.jpg')
376
377        normal_img = cv2.cvtColor(
378                cv2.imread(normal_img_path), cv2.COLOR_BGR2GRAY)
379        wide_img = cv2.cvtColor(
380                cv2.imread(wide_img_path), cv2.COLOR_BGR2GRAY)
381
382        assert get_angle(normal_img) == 0
383        assert get_angle(wide_img) == 0
384
385    def test_get_angle_identify_rotated_chessboard_angle(self):
386        basedir = os.path.join(
387                os.path.dirname(__file__), 'test_images/rotated_chessboards/')
388
389        # Array of the image files and angles containing rotated chessboards.
390        test_cases = [
391                ('_15_ccw', 15),
392                ('_30_ccw', 30),
393                ('_45_ccw', 45),
394                ('_60_ccw', 60),
395                ('_75_ccw', 75),
396                ('_90_ccw', 90)
397        ]
398
399        # For each rotated image pair (normal, wide). Check if angle is
400        # identified as expected.
401        for suffix, angle in test_cases:
402            # Define image paths
403            normal_img_path = os.path.join(
404                    basedir, 'normal{}.jpg'.format(suffix))
405            wide_img_path = os.path.join(
406                    basedir, 'wide{}.jpg'.format(suffix))
407
408            # Load and color convert images
409            normal_img = cv2.cvtColor(
410                    cv2.imread(normal_img_path), cv2.COLOR_BGR2GRAY)
411            wide_img = cv2.cvtColor(
412                    cv2.imread(wide_img_path), cv2.COLOR_BGR2GRAY)
413
414            # Assert angle is as expected up to 2.0 degrees of accuracy.
415            assert numpy.isclose(
416                    abs(get_angle(normal_img)), angle, 2.0)
417            assert numpy.isclose(
418                    abs(get_angle(wide_img)), angle, 2.0)
419
420
421if __name__ == '__main__':
422    unittest.main()
423