xref: /aosp_15_r20/cts/apps/CameraITS/utils/low_light_utils.py (revision b7c941bb3fa97aba169d73cee0bed2de8ac964bf)
1# Copyright 2024 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Utility functions for low light camera tests."""
15
16import logging
17import os.path
18
19import camera_properties_utils
20import capture_request_utils
21import cv2
22import image_processing_utils
23import matplotlib.pyplot as plt
24import numpy as np
25import opencv_processing_utils
26
27_LOW_LIGHT_BOOST_AVG_DELTA_LUMINANCE_THRESH = 18
28_LOW_LIGHT_BOOST_AVG_LUMINANCE_THRESH = 90
29_BOUNDING_BOX_COLOR = (0, 255, 0)
30_BOX_MIN_SIZE_RATIO = 0.08  # 8% of the cropped image width
31_BOX_MAX_SIZE_RATIO = 0.5  # 50% of the cropped image width
32_BOX_PADDING_RATIO = 0.2
33_CROP_PADDING = 10
34_EXPECTED_NUM_OF_BOXES = 20  # The captured image must result in 20 detected
35                             # boxes since the test scene has 20 boxes
36_KEY_BOTTOM_LEFT = 'bottom_left'
37_KEY_BOTTOM_RIGHT = 'bottom_right'
38_KEY_TOP_LEFT = 'top_left'
39_KEY_TOP_RIGHT = 'top_right'
40_MAX_ASPECT_RATIO = 1.2
41_MIN_ASPECT_RATIO = 0.8
42_RED_BGR_COLOR = (0, 0, 255)
43_NUM_CLUSTERS = 8
44_K_MEANS_ITERATIONS = 10
45_K_MEANS_EPSILON = 0.5
46_TEXT_COLOR = (255, 255, 255)
47_FIG_SIZE = (10, 6)
48
49# Allowed tablets for low light scenes
50# List entries must be entered in lowercase
51TABLET_LOW_LIGHT_SCENES_ALLOWLIST = (
52    'hwcmr09',  # Huawei MediaPad M5
53    'gta8wifi',  # Samsung Galaxy Tab A8
54    'gta8',  # Samsung Galaxy Tab A8 LTE
55    'gta9pwifi',  # Samsung Galaxy Tab A9+
56    'gta9p',  # Samsung Galaxy Tab A9+ 5G
57    'nabu',  # Xiaomi Pad 5
58    'nabu_tw',  # Xiaomi Pad 5
59    'xun',  # Xiaomi Redmi Pad SE
60)
61
62# Tablet brightness mapping strings for (rear, front) facing camera tests
63# List entries must be entered in lowercase
64TABLET_BRIGHTNESS = {
65    'hwcmr09': ('4', '8'),  # Huawei MediaPad M5
66    'gta8wifi': ('6', '12'),  # Samsung Galaxy Tab A8
67    'gta8': ('6', '12'),  # Samsung Galaxy Tab A8 LTE
68    'gta9pwifi': ('6', '12'),  # Samsung Galaxy Tab A9+
69    'gta9p': ('6', '12'),  # Samsung Galaxy Tab A9+ 5G
70    'nabu': ('8', '14'),  # Xiaomi Pad 5
71    'nabu_tw': ('8', '14'),  # Xiaomi Pad 5
72    'xun': ('6', '12'),  # Xiaomi Redmi Pad SE
73}
74
75
76def get_metering_region(cam, file_stem):
77  """Get the metering region for the given image.
78
79  Detects the chart in the preview image and returns the coordinates of the
80  chart in the active array.
81
82  Args:
83    cam: ItsSession object to send commands.
84    file_stem: File prefix for captured images.
85  Returns:
86    The metering region sensor coordinates in the active array or None if the
87    test chart was not detected.
88  """
89  req = capture_request_utils.auto_capture_request()
90  cap = cam.do_capture(req, cam.CAP_YUV)
91  img = image_processing_utils.convert_capture_to_rgb_image(cap)
92  region_detection_file = f'{file_stem}_region_detection.jpg'
93  image_processing_utils.write_image(img, region_detection_file)
94  img = cv2.imread(region_detection_file)
95
96  coords = _find_chart_bounding_region(img)
97  if coords is None:
98    return None
99
100  # Convert image coordinates to sensor coordinates for metering rectangle
101  img_w = img.shape[1]
102  img_h = img.shape[0]
103  props = cam.get_camera_properties()
104  aa = props['android.sensor.info.activeArraySize']
105  aa_width, aa_height = aa['right'] - aa['left'], aa['bottom'] - aa['top']
106  logging.debug('Active array size: %s', aa)
107  coords_tl = (coords[0], coords[1])
108  coords_br = (coords[0] + coords[2], coords[1] + coords[3])
109  s_coords_tl = image_processing_utils.convert_image_coords_to_sensor_coords(
110      aa_width, aa_height, coords_tl, img_w, img_h)
111  s_coords_br = image_processing_utils.convert_image_coords_to_sensor_coords(
112      aa_width, aa_height, coords_br, img_w, img_h)
113  sensor_coords = (s_coords_tl[0], s_coords_tl[1], s_coords_br[0],
114                   s_coords_br[1])
115
116  # If testing front camera, mirror coordinates either left/right or up/down
117  # Preview are flipped on device's natural orientation
118  # For sensor orientation 90 or 270, it is up or down
119  # For sensor orientation 0 or 180, it is left or right
120  if (props['android.lens.facing'] ==
121      camera_properties_utils.LENS_FACING['FRONT']):
122    if props['android.sensor.orientation'] in (90, 270):
123      tl_coordinates = (sensor_coords[0], aa_height - sensor_coords[3])
124      br_coordinates = (sensor_coords[2], aa_height - sensor_coords[1])
125      logging.debug('Found sensor orientation %d, flipping up down',
126                    props['android.sensor.orientation'])
127    else:
128      tl_coordinates = (aa_width - sensor_coords[2], sensor_coords[1])
129      br_coordinates = (aa_width - sensor_coords[0], sensor_coords[3])
130      logging.debug('Found sensor orientation %d, flipping left right',
131                    props['android.sensor.orientation'])
132    logging.debug('Mirrored top-left coordinates: %s', tl_coordinates)
133    logging.debug('Mirrored bottom-right coordinates: %s', br_coordinates)
134  else:
135    tl_coordinates = (sensor_coords[0], sensor_coords[1])
136    br_coordinates = (sensor_coords[2], sensor_coords[3])
137
138  tl_x = int(tl_coordinates[0])
139  tl_y = int(tl_coordinates[1])
140  br_x = int(br_coordinates[0])
141  br_y = int(br_coordinates[1])
142  rect_w = br_x - tl_x
143  rect_h = br_y - tl_y
144  return {'x': tl_x,
145          'y': tl_y,
146          'width': rect_w,
147          'height': rect_h,
148          'weight': opencv_processing_utils.AE_AWB_METER_WEIGHT}
149
150
151def _find_chart_bounding_region(img):
152  """Finds the bounding region of the chart.
153
154  Args:
155    img: numpy array; captured image from scene_low_light.
156  Returns:
157    The coordinates of the bounding region relative to the input image. This
158    is returned as (left, top, width, height) or None if the test chart was
159    not detected.
160  """
161  # To apply k-means clustering, we need to convert the image in to an array
162  # where each row represents a pixel in the image, and each column is a feature
163  # In this case, the feature represents the RGB channels of the pixel
164  data = img.reshape((-1, 3))
165  data = np.float32(data)
166
167  k_means_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
168                      _K_MEANS_ITERATIONS, _K_MEANS_EPSILON)
169  _, labels, centers = cv2.kmeans(data, _NUM_CLUSTERS, None, k_means_criteria,
170                                  _K_MEANS_ITERATIONS,
171                                  cv2.KMEANS_RANDOM_CENTERS)
172  # Find the cluster closest to red
173  min_dist = float('inf')
174  closest_cluster_index = -1
175  for index, center in enumerate(centers):
176    dist = np.linalg.norm(center - np.array(_RED_BGR_COLOR))
177    if dist < min_dist:
178      min_dist = dist
179      closest_cluster_index = index
180
181  target_label = closest_cluster_index
182
183  # create a mask using the data associated with the cluster closest to red
184  mask = labels.flatten() == target_label
185  mask = mask.reshape((img.shape[0], img.shape[1]))
186  mask = mask.astype(np.uint8)
187
188  contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
189                                 cv2.CHAIN_APPROX_SIMPLE)
190
191  max_area = 20
192  max_box = None
193
194  # Find the largest box that is closest to square
195  for c in contours:
196    x, y, w, h = cv2.boundingRect(c)
197    aspect_ratio = w / h
198    if _MIN_ASPECT_RATIO < aspect_ratio < _MAX_ASPECT_RATIO:
199      area = w * h
200      if area > max_area:
201        max_area = area
202        max_box = (x, y, w, h)
203
204  return max_box
205
206
207def _crop(img):
208  """Crops the captured image according to the red square outline.
209
210  Args:
211    img: numpy array; captured image from scene_low_light.
212  Returns:
213    numpy array of the cropped image or the original image if the crop region
214    isn't found.
215  """
216  max_box = _find_chart_bounding_region(img)
217
218  # If the box is found then return the cropped image
219  # otherwise the original image is returned
220  if max_box:
221    x, y, w, h = max_box
222    cropped_img = img[
223        y+_CROP_PADDING:y+h-_CROP_PADDING,
224        x+_CROP_PADDING:x+w-_CROP_PADDING
225    ]
226    return cropped_img
227
228  return img
229
230
231def _find_boxes(image):
232  """Finds boxes in the captured image for computing luminance.
233
234  The captured image should be of scene_low_light.png. The boxes are detected
235  by finding the contours by applying a threshold followed erosion.
236
237  Args:
238    image: numpy array; the captured image.
239  Returns:
240    array; an array of boxes, where each box is (x, y, w, h).
241  """
242  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
243  blur = cv2.GaussianBlur(gray, (3, 3), 0)
244
245  thresh = cv2.adaptiveThreshold(
246      blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 31, -5)
247
248  kernel = np.ones((3, 3), np.uint8)
249  eroded = cv2.erode(thresh, kernel, iterations=1)
250
251  contours, _ = cv2.findContours(eroded, cv2.RETR_EXTERNAL,
252                                 cv2.CHAIN_APPROX_SIMPLE)
253  boxes = []
254
255  # Filter out boxes that are too small or too large
256  # and boxes that are not square
257  img_hw_size_max = max(image.shape[0], image.shape[1])
258  box_min_size = int(round(img_hw_size_max * _BOX_MIN_SIZE_RATIO, 0))
259  if box_min_size == 0:
260    raise AssertionError('Minimum box size calculated was 0. Check cropped '
261                         'image size.')
262  box_max_size = int(img_hw_size_max * _BOX_MAX_SIZE_RATIO)
263  for c in contours:
264    x, y, w, h = cv2.boundingRect(c)
265    aspect_ratio = w / h
266    if (w > box_min_size and h > box_min_size and
267        w < box_max_size and h < box_max_size and
268        _MIN_ASPECT_RATIO < aspect_ratio < _MAX_ASPECT_RATIO):
269      boxes.append((x, y, w, h))
270  return boxes
271
272
273def _correct_image_rotation(img, regions):
274  """Corrects the captured image orientation.
275
276  The captured image should be of scene_low_light.png. The darkest square
277  must appear in the bottom right and the brightest square must appear in
278  the bottom left. This is necessary in order to traverse the hilbert
279  ordered squares to return a darkest to brightest ordering.
280
281  Args:
282    img: numpy array; the original image captured.
283    regions: the tuple of (box, luminance) computed for each square
284      in the image.
285  Returns:
286    numpy array; image in the corrected orientation.
287  """
288  corner_brightness = {
289      _KEY_TOP_LEFT: regions[2][1],
290      _KEY_BOTTOM_LEFT: regions[5][1],
291      _KEY_TOP_RIGHT: regions[14][1],
292      _KEY_BOTTOM_RIGHT: regions[17][1],
293  }
294
295  darkest_corner = ('', float('inf'))
296  brightest_corner = ('', float('-inf'))
297
298  for corner, luminance in corner_brightness.items():
299    if luminance < darkest_corner[1]:
300      darkest_corner = (corner, luminance)
301    if luminance > brightest_corner[1]:
302      brightest_corner = (corner, luminance)
303
304  if darkest_corner == brightest_corner:
305    raise AssertionError('The captured image failed to detect the location '
306                         'of the darkest and brightest squares.')
307
308  if darkest_corner[0] == _KEY_TOP_LEFT:
309    if brightest_corner[0] == _KEY_BOTTOM_LEFT:
310      # rotate 90 CW and then flip vertically
311      img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
312      img = cv2.flip(img, 0)
313    elif brightest_corner[0] == _KEY_TOP_RIGHT:
314      # flip both vertically and horizontally
315      img = cv2.flip(img, -1)
316    else:
317      raise AssertionError('The captured image failed to detect the location '
318                           'of the brightest square.')
319  elif darkest_corner[0] == _KEY_BOTTOM_LEFT:
320    if brightest_corner[0] == _KEY_TOP_LEFT:
321      # rotate 90 CCW
322      img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
323    elif brightest_corner[0] == _KEY_BOTTOM_RIGHT:
324      # flip horizontally
325      img = cv2.flip(img, 1)
326    else:
327      raise AssertionError('The captured image failed to detect the location '
328                           'of the brightest square.')
329  elif darkest_corner[0] == _KEY_TOP_RIGHT:
330    if brightest_corner[0] == _KEY_TOP_LEFT:
331      # flip vertically
332      img = cv2.flip(img, 0)
333    elif brightest_corner[0] == _KEY_BOTTOM_RIGHT:
334      # rotate 90 CW
335      img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
336    else:
337      raise AssertionError('The captured image failed to detect the location '
338                           'of the brightest square.')
339  elif darkest_corner[0] == _KEY_BOTTOM_RIGHT:
340    if brightest_corner[0] == _KEY_BOTTOM_LEFT:
341      # correct orientation
342      pass
343    elif brightest_corner[0] == _KEY_TOP_RIGHT:
344      # rotate 90 and flip horizontally
345      img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
346      img = cv2.flip(img, 1)
347    else:
348      raise AssertionError('The captured image failed to detect the location '
349                           'of the brightest square.')
350  return img
351
352
353def _compute_luminance_regions(image, boxes):
354  """Compute the luminance for each box in scene_low_light.
355
356  Args:
357    image: numpy array; captured image.
358    boxes: array; array of boxes where each box is (x, y, w, h).
359  Returns:
360    Array of tuples where each tuple is (box, luminance).
361  """
362  intensities = []
363  for b in boxes:
364    x, y, w, h = b
365    padding = min(w, h) * _BOX_PADDING_RATIO
366    left = int(x + padding)
367    top = int(y + padding)
368    right = int(x + w - padding)
369    bottom = int(y + h - padding)
370    box = image[top:bottom, left:right]
371    box_xyz = cv2.cvtColor(box, cv2.COLOR_BGR2XYZ)
372    intensity = int(np.mean(box_xyz[1]))
373    intensities.append((b, intensity))
374  return intensities
375
376
377def _draw_luminance(image, intensities):
378  """Draws the luma and noise for each box in scene_low_light for debugging.
379
380  Args:
381    image: numpy array; captured image.
382    intensities: array; array of tuples (box, luminance intensity).
383  """
384  for b, intensity in intensities:
385    x, y, w, h = b
386    padding = min(w, h) * _BOX_PADDING_RATIO
387    left = int(x + padding)
388    top = int(y + padding)
389    right = int(x + w - padding)
390    bottom = int(y + h - padding)
391    noise_stats = image_processing_utils.compute_patch_noise(
392        image, (left, top, (right - left), (bottom - top)))
393    cv2.rectangle(image, (left, top), (right, bottom), _BOUNDING_BOX_COLOR, 2)
394    # place the luma value above the box offset by 10 pixels
395    cv2.putText(img=image, text=f'{intensity}', org=(x, y - 10),
396                fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1,
397                color=_TEXT_COLOR)
398    luma = str(round(noise_stats['luma'], 1))
399    cu = str(round(noise_stats['chroma_u'], 1))
400    cv = str(round(noise_stats['chroma_v'], 1))
401    # place the noise (luma, chroma u, chroma v) values above the luma value
402    # offset by 30 pixels
403    cv2.putText(img=image, text=f'{luma}, {cu}, {cv}', org=(x, y - 30),
404                fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1,
405                color=_TEXT_COLOR)
406
407
408def _compute_avg(results):
409  """Computes the average luminance of the first 6 boxes.
410
411  The boxes are part of scene_low_light.
412
413  Args:
414    results: A list of tuples where each tuple is (box, luminance).
415  Returns:
416    float; The average luminance of the first 6 boxes.
417  """
418  luminance_values = [luminance for _, luminance in results[:6]]
419  avg = sum(luminance_values) / len(luminance_values)
420  return avg
421
422
423def _compute_avg_delta_of_successive_boxes(results):
424  """Computes the delta of successive boxes & takes the average of the first 5.
425
426  The boxes are part of scene_low_light.
427
428  Args:
429    results: A list of tuples where each tuple is (box, luminance).
430  Returns:
431    float; The average of the first 5 deltas of successive boxes.
432  """
433  luminance_values = [luminance for _, luminance in results[:6]]
434  delta = [luminance_values[i] - luminance_values[i - 1]
435           for i in range(1, len(luminance_values))]
436  avg = sum(delta) / len(delta)
437  return avg
438
439
440def _plot_results(results, file_stem):
441  """Plots the computed luminance for each box in scene_low_light.
442
443  Args:
444    results: A list of tuples where each tuple is (box, luminance).
445    file_stem: The output file where the plot is saved.
446  """
447  luminance_values = [luminance for _, luminance in results]
448  box_labels = [f'Box {i + 1}' for i in range(len(results))]
449
450  plt.figure(figsize=_FIG_SIZE)
451  plt.plot(box_labels, luminance_values, marker='o', linestyle='-', color='b')
452  plt.scatter(box_labels, luminance_values, color='r')
453
454  plt.title('Luminance for each Box')
455  plt.xlabel('Boxes')
456  plt.ylabel('Luminance (pixel intensity)')
457  plt.grid('True')
458  plt.xticks(rotation=45)
459  plt.savefig(f'{file_stem}_luminance_plot.png', dpi=300)
460  plt.close()
461
462
463def _plot_successive_difference(results, file_stem):
464  """Plots the successive difference in luminance between each box.
465
466  The boxes are part of scene_low_light.
467
468  Args:
469    results: A list of tuples where each tuple is (box, luminance).
470    file_stem: The output file where the plot is saved.
471  """
472  luminance_values = [luminance for _, luminance in results]
473  delta = [luminance_values[i] - luminance_values[i - 1]
474           for i in range(1, len(luminance_values))]
475  box_labels = [f'Box {i} to Box {i + 1}' for i in range(1, len(results))]
476
477  plt.figure(figsize=_FIG_SIZE)
478  plt.plot(box_labels, delta, marker='o', linestyle='-', color='b')
479  plt.scatter(box_labels, delta, color='r')
480
481  plt.title('Difference in Luminance Between Successive Boxes')
482  plt.xlabel('Box Transition')
483  plt.ylabel('Luminance Difference')
484  plt.grid('True')
485  plt.xticks(rotation=45)
486  plt.savefig(
487      f'{file_stem}_luminance_difference_between_successive_boxes_plot.png',
488      dpi=300)
489  plt.close()
490
491
492def _plot_noise(results, file_stem, img, test_name):
493  """Plots the noise in the image.
494
495  The boxes are part of scene_low_light.
496
497  Args:
498    results: A list of tuples where each tuple is (box, luminance).
499    file_stem: The output file where the plot is saved.
500    img: The captured image used to measure patch noise.
501    test_name: Name of the test being plotted.
502  """
503  luma_noise_values = []
504  chroma_u_noise_values = []
505  chroma_v_noise_values = []
506  for region, _ in results:
507    x, y, w, h = region
508    padding = min(w, h) * _BOX_PADDING_RATIO
509    left = int(x + padding)
510    top = int(y + padding)
511    right = int(x + w - padding)
512    bottom = int(y + h - padding)
513    noise_stats = image_processing_utils.compute_patch_noise(
514        img, (left, top, (right - left), (bottom - top)))
515    luma_noise_values.append(noise_stats['luma'])
516    chroma_u_noise_values.append(noise_stats['chroma_u'])
517    chroma_v_noise_values.append(noise_stats['chroma_v'])
518
519  box_labels = [f'Box {i + 1}' for i in range(len(results))]
520
521  plt.figure(figsize=_FIG_SIZE)
522  plt.plot(box_labels, luma_noise_values, marker='o', linestyle='-',
523           color='b', label='luma')
524  plt.plot(box_labels, chroma_u_noise_values, marker='o', linestyle='-',
525           color='r', label='chroma u')
526  plt.plot(box_labels, chroma_v_noise_values, marker='o', linestyle='-',
527           color='g', label='chroma v')
528  plt.legend()
529
530  plt.title('Luma, Chroma U, and Chroma V Noise per Box')
531  plt.xlabel('Box')
532  plt.ylabel('Noise (std dev)')
533  plt.grid('True')
534  plt.xticks(rotation=45)
535  plt.savefig(f'{file_stem}_noise_per_box_plot.png', dpi=300)
536  plt.close()
537  # print the chart luma values for telemetry purposes
538  # do not convert to logging.debug
539  print(f'{test_name}_noise_luma: {luma_noise_values}')
540  print(f'{test_name}_noise_chroma_u: {chroma_u_noise_values}')
541  print(f'{test_name}_noise_chroma_v: {chroma_v_noise_values}')
542
543
544def _sort_by_columns(regions):
545  """Sort the regions by columns and then by row within each column.
546
547  These regions are part of scene_low_light.
548
549  Args:
550    regions: The tuple of (box, luminance) of each square.
551  Returns:
552    array; an array of tuples of (box, luminance) sorted by columns then by row
553      within each column.
554  """
555  # The input is 20 elements. The first two and last two elements represent the
556  # 4 boxes on the outside used for diagnostics. Boxes in indices 2 through 17
557  # represent the elements in the 4x4 grid.
558
559  # Sort all elements by column
560  col_sorted = sorted(regions, key=lambda r: r[0][0])
561
562  # Sort elements within each column by row
563  result = []
564  result.extend(sorted(col_sorted[:2], key=lambda r: r[0][1]))
565
566  for i in range(4):
567    # take 4 rows per column and then sort the rows
568    # skip the first two elements
569    offset = i*4+2
570    col = col_sorted[offset:(offset+4)]
571    result.extend(sorted(col, key=lambda r: r[0][1]))
572
573  result.extend(sorted(col_sorted[-2:], key=lambda r: r[0][1]))
574  return result
575
576
577def analyze_low_light_scene_capture(
578    file_stem,
579    img,
580    avg_luminance_threshold=_LOW_LIGHT_BOOST_AVG_LUMINANCE_THRESH,
581    avg_delta_luminance_threshold=_LOW_LIGHT_BOOST_AVG_DELTA_LUMINANCE_THRESH):
582  """Analyze a captured frame to check if it meets low light scene criteria.
583
584  The capture is cropped first, then detects for boxes, and then computes the
585  luminance of each box.
586
587  Args:
588    file_stem: The file prefix for results saved.
589    img: numpy array; The captured image loaded by cv2 as and available for
590      analysis.
591    avg_luminance_threshold: minimum average luminance of the first 6 boxes.
592    avg_delta_luminance_threshold: minimum average difference in luminance
593      of the first 5 successive boxes of luminance.
594  """
595  cv2.imwrite(f'{file_stem}_original.jpg', img)
596  img = _crop(img)
597  cv2.imwrite(f'{file_stem}_cropped.jpg', img)
598  boxes = _find_boxes(img)
599  if len(boxes) != _EXPECTED_NUM_OF_BOXES:
600    raise AssertionError('The captured image failed to detect the expected '
601                         'number of boxes. '
602                         'Check the captured image to see if the image was '
603                         'correctly captured and try again. '
604                         f'Actual: {len(boxes)}, '
605                         f'Expected: {_EXPECTED_NUM_OF_BOXES}')
606
607  regions = _compute_luminance_regions(img, boxes)
608
609  # Sorted so each column is read left to right
610  sorted_regions = _sort_by_columns(regions)
611  img = _correct_image_rotation(img, sorted_regions)
612  cv2.imwrite(f'{file_stem}_rotated.jpg', img)
613
614  # The orientation of the image may have changed which will affect the
615  # coordinates of the squares. Therefore, locate the squares, recompute the
616  # regions, and sort again
617  boxes = _find_boxes(img)
618  regions = _compute_luminance_regions(img, boxes)
619  sorted_regions = _sort_by_columns(regions)
620
621  # Reorder this so the regions are increasing in luminance according to the
622  # Hilbert curve arrangement pattern of the grid
623  # See scene_low_light_reference.png which indicates the order of each
624  # box
625  hilbert_ordered = [
626      sorted_regions[17],
627      sorted_regions[13],
628      sorted_regions[12],
629      sorted_regions[16],
630      sorted_regions[15],
631      sorted_regions[14],
632      sorted_regions[10],
633      sorted_regions[11],
634      sorted_regions[7],
635      sorted_regions[6],
636      sorted_regions[2],
637      sorted_regions[3],
638      sorted_regions[4],
639      sorted_regions[8],
640      sorted_regions[9],
641      sorted_regions[5],
642  ]
643
644  test_name = os.path.basename(file_stem)
645
646  _plot_results(hilbert_ordered, file_stem)
647  _plot_successive_difference(hilbert_ordered, file_stem)
648  _plot_noise(hilbert_ordered, file_stem, img, test_name)
649
650  _draw_luminance(img, regions)
651  cv2.imwrite(f'{file_stem}_result.jpg', img)
652
653  avg = _compute_avg(hilbert_ordered)
654  delta_avg = _compute_avg_delta_of_successive_boxes(hilbert_ordered)
655
656  # the following print statements are necessary for telemetry
657  # do not convert to logging.debug
658  print(f'{test_name}_avg_luma: {avg:.2f}')
659  print(f'{test_name}_delta_avg_luma: {delta_avg:.2f}')
660  chart_luma_values = [v[1] for v in hilbert_ordered]
661  print(f'{test_name}_chart_luma: {chart_luma_values}')
662
663  logging.debug('average luminance of the 6 boxes: %.2f', avg)
664  logging.debug('average difference in luminance of 5 successive boxes: %.2f',
665                delta_avg)
666  if avg < float(avg_luminance_threshold):
667    raise AssertionError('Average luminance of the first 6 boxes did not '
668                         'meet minimum requirements for low light scene '
669                         'criteria. '
670                         f'Actual: {avg:.2f}, '
671                         f'Expected: {avg_luminance_threshold}')
672  if delta_avg < float(avg_delta_luminance_threshold):
673    raise AssertionError('The average difference in luminance of the first 5 '
674                         'successive boxes did not meet minimum requirements '
675                         'for low light scene criteria. '
676                         f'Actual: {delta_avg:.2f}, '
677                         f'Expected: {avg_delta_luminance_threshold}')
678