xref: /aosp_15_r20/cts/apps/CameraITS/utils/its_session_utils.py (revision b7c941bb3fa97aba169d73cee0bed2de8ac964bf)
1# Copyright 2013 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Utility functions to form an ItsSession and perform various camera actions.
15"""
16
17
18import collections
19import fnmatch
20import glob
21import json
22import logging
23import math
24import os
25import socket
26import subprocess
27import sys
28import time
29import types
30import unicodedata
31
32from mobly.controllers.android_device_lib import adb
33import numpy
34
35import camera_properties_utils
36import capture_request_utils
37import error_util
38import image_processing_utils
39import its_device_utils
40import opencv_processing_utils
41import ui_interaction_utils
42
43ANDROID13_API_LEVEL = 33
44ANDROID14_API_LEVEL = 34
45ANDROID15_API_LEVEL = 35
46ANDROID16_API_LEVEL = 36
47CHART_DISTANCE_NO_SCALING = 0
48IMAGE_FORMAT_JPEG = 256
49IMAGE_FORMAT_YUV_420_888 = 35
50JCA_CAPTURE_PATH_TAG = 'JCA_CAPTURE_PATH'
51JCA_CAPTURE_STATUS_TAG = 'JCA_CAPTURE_STATUS'
52LOAD_SCENE_DELAY_SEC = 3
53PREVIEW_MAX_TESTED_AREA = 1920 * 1440
54PREVIEW_MIN_TESTED_AREA = 320 * 240
55PRIVATE_FORMAT = 'priv'
56JPEG_R_FMT_STR = 'jpeg_r'
57SCALING_TO_FILE_ATOL = 0.01
58SINGLE_CAPTURE_NCAP = 1
59SUB_CAMERA_SEPARATOR = '.'
60# pylint: disable=line-too-long
61# Allowed tablets as listed on https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-requirements
62# List entries must be entered in lowercase
63TABLET_ALLOWLIST = (
64    'dragon',  # Google Pixel C
65    'hnhey-q',  # Honor Pad 8
66    'hwcmr09',  # Huawei MediaPad M5
67    'x306f',  # Lenovo Tab M10 HD (Gen 2)
68    'x606f',  # Lenovo Tab M10 Plus
69    'j606f',  # Lenovo Tab P11
70    'tb350fu',  # Lenovo Tab P11 (Gen 2)
71    'agta',  # Nokia T21
72    'gta4lwifi',  # Samsung Galaxy Tab A7
73    'gta8wifi',  # Samsung Galaxy Tab A8
74    'gta8',  # Samsung Galaxy Tab A8 LTE
75    'gta9pwifi',  # Samsung Galaxy Tab A9+
76    'gta9p',  # Samsung Galaxy Tab A9+ 5G
77    'dpd2221',  # Vivo Pad2
78    'nabu',  # Xiaomi Pad 5
79    'nabu_tw',  # Xiaomi Pad 5
80    'xun',  # Xiaomi Redmi Pad SE
81    'yunluo',  # Xiaomi Redmi Pad
82)
83TABLET_DEFAULT_BRIGHTNESS = 192  # 8-bit tablet 75% brightness
84TABLET_LEGACY_BRIGHTNESS = 96
85TABLET_LEGACY_NAME = 'dragon'
86# List entries must be entered in lowercase
87TABLET_OS_VERSION = types.MappingProxyType({
88    'nabu': ANDROID13_API_LEVEL,
89    'nabu_tw': ANDROID13_API_LEVEL,
90    'yunluo': ANDROID14_API_LEVEL
91    })
92TABLET_REQUIREMENTS_URL = 'https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-allowlist'
93TABLET_BRIGHTNESS_ERROR_MSG = ('Tablet brightness not set as per '
94                               f'{TABLET_REQUIREMENTS_URL} in the config file')
95TABLET_NOT_ALLOWED_ERROR_MSG = ('Tablet model or tablet Android version is '
96                                'not on our allowlist, please refer to '
97                                f'{TABLET_REQUIREMENTS_URL}')
98TAP_COORDINATES = (500, 500)  # Location to tap tablet screen via adb
99USE_CASE_CROPPED_RAW = 6
100VIDEO_SCENES = ('scene_video',)
101NOT_YET_MANDATED_MESSAGE = 'Not yet mandated test'
102RESULT_OK_STATUS = '-1'
103
104_FLASH_MODE_OFF = 0
105_VALIDATE_LIGHTING_PATCH_H = 0.05
106_VALIDATE_LIGHTING_PATCH_W = 0.05
107_VALIDATE_LIGHTING_REGIONS = {
108    'top-left': (0, 0),
109    'top-right': (0, 1-_VALIDATE_LIGHTING_PATCH_H),
110    'bottom-left': (1-_VALIDATE_LIGHTING_PATCH_W, 0),
111    'bottom-right': (1-_VALIDATE_LIGHTING_PATCH_W,
112                     1-_VALIDATE_LIGHTING_PATCH_H),
113}
114_MODULAR_MACRO_OFFSET = 0.35  # Determined empirically from modular rig testing
115_VALIDATE_LIGHTING_REGIONS_MODULAR_UW = {
116    'top-left': (_MODULAR_MACRO_OFFSET, _MODULAR_MACRO_OFFSET),
117    'bottom-left': (_MODULAR_MACRO_OFFSET,
118                    1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_H),
119    'top-right': (1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_W,
120                  _MODULAR_MACRO_OFFSET),
121    'bottom-right': (1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_W,
122                     1-_MODULAR_MACRO_OFFSET-_VALIDATE_LIGHTING_PATCH_H),
123}
124_VALIDATE_LIGHTING_MACRO_FOV_THRESH = 110
125_VALIDATE_LIGHTING_THRESH = 0.05  # Determined empirically from scene[1:6] tests
126_VALIDATE_LIGHTING_THRESH_DARK = 0.3  # Determined empirically for night test
127_CMD_NAME_STR = 'cmdName'
128_OBJ_VALUE_STR = 'objValue'
129_STR_VALUE_STR = 'strValue'
130_TAG_STR = 'tag'
131_CAMERA_ID_STR = 'cameraId'
132_EXTRA_TIMEOUT_FACTOR = 10
133_COPY_SCENE_DELAY_SEC = 1
134_DST_SCENE_DIR = '/sdcard/Download/'
135_BIT_HLG10 = 0x01  # bit 1 for feature mask
136_BIT_STABILIZATION = 0x02  # bit 2 for feature mask
137
138
139def validate_tablet(tablet_name, brightness, device_id):
140  """Ensures tablet brightness is set according to documentation.
141
142  https://source.android.com/docs/compatibility/cts/camera-its-box#tablet-allowlist
143  Args:
144    tablet_name: tablet product name specified by `ro.product.device`.
145    brightness: brightness specified by config file.
146    device_id: str; ID of the device.
147  """
148  tablet_name = tablet_name.lower()
149  if tablet_name not in TABLET_ALLOWLIST:
150    raise AssertionError(
151        f'Tablet product name: {tablet_name}. {TABLET_NOT_ALLOWED_ERROR_MSG}'
152    )
153  if tablet_name in TABLET_OS_VERSION:
154    if (device_sdk := get_build_sdk_version(
155        device_id)) < TABLET_OS_VERSION[tablet_name]:
156      raise AssertionError(
157          f' Tablet product name: {tablet_name}. '
158          f'Android version: {device_sdk}. {TABLET_NOT_ALLOWED_ERROR_MSG}'
159      )
160  name_to_brightness = {
161      TABLET_LEGACY_NAME: TABLET_LEGACY_BRIGHTNESS,
162  }
163  if tablet_name in name_to_brightness:
164    if brightness != name_to_brightness[tablet_name]:
165      raise AssertionError(TABLET_BRIGHTNESS_ERROR_MSG)
166  else:
167    if brightness != TABLET_DEFAULT_BRIGHTNESS:
168      raise AssertionError(TABLET_BRIGHTNESS_ERROR_MSG)
169
170
171def check_apk_installed(device_id, package_name):
172  """Verifies that an APK is installed on a given device.
173
174  Args:
175    device_id: str; ID of the device.
176    package_name: str; name of the package that should be installed.
177  """
178  verify_cts_cmd = (
179      f'adb -s {device_id} shell pm list packages | '
180      f'grep {package_name}'
181  )
182  bytes_output = subprocess.check_output(
183      verify_cts_cmd, stderr=subprocess.STDOUT, shell=True
184  )
185  output = str(bytes_output.decode('utf-8')).strip()
186  if package_name not in output:
187    raise AssertionError(
188        f'{package_name} not installed on device {device_id}!'
189    )
190
191
192def get_array_size(buffer):
193  """Get array size based on different NumPy versions' functions.
194
195  Args:
196    buffer: A NumPy array.
197
198  Returns:
199    buffer_size: The size of the buffer.
200  """
201  np_version = numpy.__version__
202  if np_version.startswith(('1.25', '1.26', '2.')):
203    buffer_size = numpy.prod(buffer.shape)
204  else:
205    buffer_size = numpy.product(buffer.shape)
206  return buffer_size
207
208
209class ItsSession(object):
210  """Controls a device over adb to run ITS scripts.
211
212    The script importing this module (on the host machine) prepares JSON
213    objects encoding CaptureRequests, specifying sets of parameters to use
214    when capturing an image using the Camera2 APIs. This class encapsulates
215    sending the requests to the device, monitoring the device's progress, and
216    copying the resultant captures back to the host machine when done. TCP
217    forwarded over adb is the transport mechanism used.
218
219    The device must have CtsVerifier.apk installed.
220
221    Attributes:
222        sock: The open socket.
223  """
224
225  # Open a connection to localhost:<host_port>, forwarded to port 6000 on the
226  # device. <host_port> is determined at run-time to support multiple
227  # connected devices.
228  IPADDR = '127.0.0.1'
229  REMOTE_PORT = 6000
230  BUFFER_SIZE = 4096
231
232  # LOCK_PORT is used as a mutex lock to protect the list of forwarded ports
233  # among all processes. The script assumes LOCK_PORT is available and will
234  # try to use ports between CLIENT_PORT_START and
235  # CLIENT_PORT_START+MAX_NUM_PORTS-1 on host for ITS sessions.
236  CLIENT_PORT_START = 6000
237  MAX_NUM_PORTS = 100
238  LOCK_PORT = CLIENT_PORT_START + MAX_NUM_PORTS
239
240  # Seconds timeout on each socket operation.
241  SOCK_TIMEOUT = 20.0
242  # Seconds timeout on performance measurement socket operation
243  SOCK_TIMEOUT_FOR_PERF_MEASURE = 40.0
244  # Seconds timeout on preview recording socket operation.
245  SOCK_TIMEOUT_PREVIEW = 30.0  # test_imu_drift is 30s
246
247  # Additional timeout in seconds when ITS service is doing more complicated
248  # operations, for example: issuing warmup requests before actual capture.
249  EXTRA_SOCK_TIMEOUT = 5.0
250
251  PACKAGE = 'com.android.cts.verifier.camera.its'
252  INTENT_START = 'com.android.cts.verifier.camera.its.START'
253
254  # This string must be in sync with ItsService. Updated when interface
255  # between script and ItsService is changed.
256  ITS_SERVICE_VERSION = '1.0'
257
258  SEC_TO_NSEC = 1000*1000*1000.0
259  adb = 'adb -d'
260
261  # Predefine camera props. Save props extracted from the function,
262  # "get_camera_properties".
263  props = None
264
265  IMAGE_FORMAT_LIST_1 = [
266      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
267      'dngImage', 'y8Image', 'jpeg_rImage',
268      'rawQuadBayerImage', 'rawQuadBayerStatsImage',
269      'raw10StatsImage', 'raw10QuadBayerStatsImage', 'raw10QuadBayerImage'
270  ]
271
272  IMAGE_FORMAT_LIST_2 = [
273      'jpegImage', 'rawImage', 'raw10Image', 'raw12Image', 'rawStatsImage',
274      'yuvImage', 'jpeg_rImage',
275      'rawQuadBayerImage', 'rawQuadBayerStatsImage',
276      'raw10StatsImage', 'raw10QuadBayerStatsImage', 'raw10QuadBayerImage'
277  ]
278
279  CAP_JPEG = {'format': 'jpeg'}
280  CAP_RAW = {'format': 'raw'}
281  CAP_CROPPED_RAW = {'format': 'raw', 'useCase': USE_CASE_CROPPED_RAW}
282  CAP_YUV = {'format': 'yuv'}
283  CAP_RAW_YUV = [{'format': 'raw'}, {'format': 'yuv'}]
284
285  def __init_socket_port(self):
286    """Initialize the socket port for the host to forward requests to the device.
287
288    This method assumes localhost's LOCK_PORT is available and will try to
289    use ports between CLIENT_PORT_START and CLIENT_PORT_START+MAX_NUM_PORTS-1
290    """
291    num_retries = 100
292    retry_wait_time_sec = 0.05
293
294    # Bind a socket to use as mutex lock
295    socket_lock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
296    for i in range(num_retries):
297      try:
298        socket_lock.bind((ItsSession.IPADDR, ItsSession.LOCK_PORT))
299        break
300      except (socket.error, socket.timeout) as socket_issue:
301        if i == num_retries - 1:
302          raise error_util.CameraItsError(
303              self._device_id, 'socket lock returns error') from socket_issue
304        else:
305          time.sleep(retry_wait_time_sec)
306
307    # Check if a port is already assigned to the device.
308    command = 'adb forward --list'
309    proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
310    # pylint: disable=unused-variable
311    output, error = proc.communicate()
312    port = None
313    used_ports = []
314    for line  in output.decode('utf-8').split(os.linesep):
315      # each line should be formatted as:
316      # "<device_id> tcp:<host_port> tcp:<remote_port>"
317      forward_info = line.split()
318      if len(forward_info) >= 3 and len(
319          forward_info[1]) > 4 and forward_info[1][:4] == 'tcp:' and len(
320              forward_info[2]) > 4 and forward_info[2][:4] == 'tcp:':
321        local_p = int(forward_info[1][4:])
322        remote_p = int(forward_info[2][4:])
323        if forward_info[
324            0] == self._device_id and remote_p == ItsSession.REMOTE_PORT:
325          port = local_p
326          break
327        else:
328          used_ports.append(local_p)
329
330      # Find the first available port if no port is assigned to the device.
331    if port is None:
332      for p in range(ItsSession.CLIENT_PORT_START,
333                     ItsSession.CLIENT_PORT_START + ItsSession.MAX_NUM_PORTS):
334        if self.check_port_availability(p, used_ports):
335          port = p
336          break
337
338    if port is None:
339      raise error_util.CameraItsError(self._device_id,
340                                      ' cannot find an available ' + 'port')
341
342    # Release the socket as mutex unlock
343    socket_lock.close()
344
345    # Connect to the socket
346    self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
347    self.sock.connect((self.IPADDR, port))
348    self.sock.settimeout(self.SOCK_TIMEOUT)
349
350  def check_port_availability(self, check_port, used_ports):
351    """Check if the port is available or not.
352
353    Args:
354      check_port: Port to check for availability
355      used_ports: List of used ports
356
357    Returns:
358     True if the given port is available and can be assigned to the device.
359    """
360    if check_port not in used_ports:
361      # Try to run "adb forward" with the port
362      command = ('%s forward tcp:%d tcp:%d' %
363                 (self.adb, check_port, self.REMOTE_PORT))
364      proc = subprocess.Popen(
365          command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
366      error = proc.communicate()[1]
367
368      # Check if there is no error
369      if error is None or error.find('error'.encode()) < 0:
370        return True
371      else:
372        return False
373
374  def __wait_for_service(self):
375    """Wait for ItsService to be ready and reboot the device if needed.
376
377    This also includes the optional reboot handling: if the user
378    provides a "reboot" or "reboot=N" arg, then reboot the device,
379    waiting for N seconds (default 30) before returning.
380    """
381
382    for s in sys.argv[1:]:
383      if s[:6] == 'reboot':
384        duration = 30
385        if len(s) > 7 and s[6] == '=':
386          duration = int(s[7:])
387        logging.debug('Rebooting device')
388        its_device_utils.run(f'{self.adb} reboot')
389        its_device_utils.run(f'{self.adb} wait-for-device')
390        time.sleep(duration)
391        logging.debug('Reboot complete')
392
393    # Flush logcat so following code won't be misled by previous
394    # 'ItsService ready' log.
395    its_device_utils.run(f'{self.adb} logcat -c')
396    time.sleep(1)
397
398    its_device_utils.run(
399        f'{self.adb} shell am force-stop --user cur {self.PACKAGE}')
400    its_device_utils.run(
401        f'{self.adb} shell am start-foreground-service --user cur '
402        f'-t text/plain -a {self.INTENT_START}'
403    )
404
405    # Wait until the socket is ready to accept a connection.
406    proc = subprocess.Popen(
407        self.adb.split() + ['logcat'], stdout=subprocess.PIPE)
408    logcat = proc.stdout
409    while True:
410      line = logcat.readline().strip()
411      if line.find(b'ItsService ready') >= 0:
412        break
413    proc.kill()
414    proc.communicate()
415
416  def __init__(self, device_id=None, camera_id=None, hidden_physical_id=None,
417               override_to_portrait=None):
418    self._camera_id = camera_id
419    self._device_id = device_id
420    self._hidden_physical_id = hidden_physical_id
421    self._override_to_portrait = override_to_portrait
422
423    # Initialize device id and adb command.
424    self.adb = 'adb -s ' + self._device_id
425    self.__wait_for_service()
426    self.__init_socket_port()
427
428  def __enter__(self):
429    self.close_camera()
430    self.__open_camera()
431    return self
432
433  def __exit__(self, exec_type, exec_value, exec_traceback):
434    if hasattr(self, 'sock') and self.sock:
435      self.close_camera()
436      self.sock.close()
437    return False
438
439  def override_with_hidden_physical_camera_props(self, props):
440    """Check that it is a valid sub-camera backing the logical camera.
441
442    If current session is for a hidden physical camera, check that it is a valid
443    sub-camera backing the logical camera, override self.props, and return the
444    characteristics of sub-camera. Otherwise, return "props" directly.
445
446    Args:
447     props: Camera properties object.
448
449    Returns:
450     The properties of the hidden physical camera if possible.
451    """
452    if self._hidden_physical_id:
453      if not camera_properties_utils.logical_multi_camera(props):
454        logging.debug('cam %s not a logical multi-camera: no change in props.',
455                      self._hidden_physical_id)
456        return props
457      physical_ids = camera_properties_utils.logical_multi_camera_physical_ids(
458          props)
459      if self._hidden_physical_id not in physical_ids:
460        raise AssertionError(f'{self._hidden_physical_id} is not a hidden '
461                             f'sub-camera of {self._camera_id}')
462      logging.debug('Overriding cam %s props', self._hidden_physical_id)
463      props = self.get_camera_properties_by_id(self._hidden_physical_id)
464      self.props = props
465    return props
466
467  def get_camera_properties(self):
468    """Get the camera properties object for the device.
469
470    Returns:
471     The Python dictionary object for the CameraProperties object.
472    """
473    cmd = {}
474    cmd[_CMD_NAME_STR] = 'getCameraProperties'
475    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
476    data, _ = self.__read_response_from_socket()
477    if data[_TAG_STR] != 'cameraProperties':
478      raise error_util.CameraItsError('Invalid command response')
479    self.props = data[_OBJ_VALUE_STR]['cameraProperties']
480    return data[_OBJ_VALUE_STR]['cameraProperties']
481
482  def get_session_properties(self, out_surfaces, cap_request):
483    """Get the camera properties object for a session configuration.
484
485    Args:
486      out_surfaces: output surfaces used to query session props.
487      cap_request: capture request used to query session props.
488
489    Returns:
490     The Python dictionary object for the CameraProperties object.
491    """
492    cmd = {}
493    cmd[_CMD_NAME_STR] = 'getCameraSessionProperties'
494    if out_surfaces:
495      if isinstance(out_surfaces, list):
496        cmd['outputSurfaces'] = out_surfaces
497      else:
498        cmd['outputSurfaces'] = [out_surfaces]
499      formats = [
500          c['format'] if 'format' in c else 'yuv' for c in cmd['outputSurfaces']
501      ]
502      formats = [s if s != 'jpg' else 'jpeg' for s in formats]
503    else:
504      max_yuv_size = capture_request_utils.get_available_output_sizes(
505          'yuv', self.props)[0]
506      formats = ['yuv']
507      cmd['outputSurfaces'] = [{
508          'format': 'yuv',
509          'width': max_yuv_size[0],
510          'height': max_yuv_size[1]
511      }]
512    cmd['captureRequest'] = cap_request
513
514    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
515    data, _ = self.__read_response_from_socket()
516    if data[_TAG_STR] != 'cameraProperties':
517      raise error_util.CameraItsError('Invalid command response')
518    self.props = data[_OBJ_VALUE_STR]['cameraProperties']
519    return data[_OBJ_VALUE_STR]['cameraProperties']
520
521  def get_camera_properties_by_id(self, camera_id, override_to_portrait=None):
522    """Get the camera properties object for device with camera_id.
523
524    Args:
525     camera_id: The ID string of the camera
526     override_to_portrait: Optional value for overrideToPortrait
527
528    Returns:
529     The Python dictionary object for the CameraProperties object. Empty
530     if no such device exists.
531    """
532    cmd = {}
533    cmd[_CMD_NAME_STR] = 'getCameraPropertiesById'
534    cmd[_CAMERA_ID_STR] = camera_id
535    if override_to_portrait is not None:
536      cmd['overrideToPortrait'] = override_to_portrait
537    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
538    data, _ = self.__read_response_from_socket()
539    if data[_TAG_STR] != 'cameraProperties':
540      raise error_util.CameraItsError('Invalid command response')
541    return data[_OBJ_VALUE_STR]['cameraProperties']
542
543  def __read_response_from_socket(self):
544    """Reads a line (newline-terminated) string serialization of JSON object.
545
546    Returns:
547     Deserialized json obj.
548    """
549    chars = []
550    while not chars or chars[-1] != '\n':
551      ch = self.sock.recv(1).decode('utf-8')
552      if not ch:
553        # Socket was probably closed; otherwise don't get empty strings
554        raise error_util.CameraItsError('Problem with socket on device side')
555      chars.append(ch)
556    line = ''.join(chars)
557    jobj = json.loads(line)
558    # Optionally read a binary buffer of a fixed size.
559    buf = None
560    if 'bufValueSize' in jobj:
561      n = jobj['bufValueSize']
562      buf = bytearray(n)
563      view = memoryview(buf)
564      while n > 0:
565        nbytes = self.sock.recv_into(view, n)
566        view = view[nbytes:]
567        n -= nbytes
568      buf = numpy.frombuffer(buf, dtype=numpy.uint8)
569    return jobj, buf
570
571  def __open_camera(self):
572    """Get the camera ID to open if it is an argument as a single camera.
573
574    This allows passing camera=# to individual tests at command line
575    and camera=#,#,# or an no camera argv with tools/run_all_tests.py.
576    In case the camera is a logical multi-camera, to run ITS on the
577    hidden physical sub-camera, pass camera=[logical ID]:[physical ID]
578    to an individual test at the command line, and same applies to multiple
579    camera IDs for tools/run_all_tests.py: camera=#,#:#,#:#,#
580    """
581    if not self._camera_id:
582      self._camera_id = 0
583      for s in sys.argv[1:]:
584        if s[:7] == 'camera=' and len(s) > 7:
585          camera_ids = s[7:].split(',')
586          camera_id_combos = parse_camera_ids(camera_ids)
587          if len(camera_id_combos) == 1:
588            self._camera_id = camera_id_combos[0].id
589            self._hidden_physical_id = camera_id_combos[0].sub_id
590
591    logging.debug('Opening camera: %s', self._camera_id)
592    cmd = {_CMD_NAME_STR: 'open', _CAMERA_ID_STR: self._camera_id}
593    if self._override_to_portrait is not None:
594      cmd['overrideToPortrait'] = self._override_to_portrait
595    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
596    data, _ = self.__read_response_from_socket()
597    if data[_TAG_STR] != 'cameraOpened':
598      raise error_util.CameraItsError('Invalid command response')
599
600  def close_camera(self):
601    cmd = {_CMD_NAME_STR: 'close'}
602    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
603    data, _ = self.__read_response_from_socket()
604    if data[_TAG_STR] != 'cameraClosed':
605      raise error_util.CameraItsError('Invalid command response')
606
607  def zoom_ratio_within_range(self, zoom_ratio):
608    """Determine if a given zoom ratio is within device zoom range.
609
610    Args:
611      zoom_ratio: float; zoom ratio requested
612    Returns:
613      Boolean: True, if zoom_ratio inside device range. False otherwise.
614    """
615    zoom_range = self.props['android.control.zoomRatioRange']
616    return zoom_ratio >= zoom_range[0] and zoom_ratio <= zoom_range[1]
617
618  def get_sensors(self):
619    """Get all sensors on the device.
620
621    Returns:
622       A Python dictionary that returns keys and booleans for each sensor.
623    """
624    cmd = {}
625    cmd[_CMD_NAME_STR] = 'checkSensorExistence'
626    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
627    data, _ = self.__read_response_from_socket()
628    if data[_TAG_STR] != 'sensorExistence':
629      raise error_util.CameraItsError('Invalid response for command: %s' %
630                                      cmd[_CMD_NAME_STR])
631    return data[_OBJ_VALUE_STR]
632
633  def get_default_camera_pkg(self):
634    """Get default camera app package name.
635
636    Returns:
637       Default camera app pkg name.
638    """
639    cmd = {}
640    cmd[_CMD_NAME_STR] = 'doGetDefaultCameraPkgName'
641    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
642    data, _ = self.__read_response_from_socket()
643    if data[_TAG_STR] != 'defaultCameraPkg':
644      raise error_util.CameraItsError('Invalid response for command: %s' %
645                                      cmd[_CMD_NAME_STR])
646    return data['strValue']
647
648  def check_gain_map_present(self, file_path):
649    """Check if the image has gainmap present or not.
650
651    The image stored at file_path is decoded and analyzed
652    to check whether the gainmap is present or not. If the image
653    captured is UltraHDR, it should have gainmap present.
654
655    Args:
656      file_path: path of the image to be analyzed on DUT.
657    Returns:
658      Boolean: True if the image has gainmap present.
659    """
660    cmd = {}
661    cmd[_CMD_NAME_STR] = 'doGainMapCheck'
662    cmd['filePath'] = file_path
663    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
664    data, _ = self.__read_response_from_socket()
665    if data[_TAG_STR] != 'gainmapPresent':
666      raise error_util.CameraItsError(
667          'Invalid response for command: %s' % cmd[_CMD_NAME_STR])
668    return data['strValue']
669
670  def start_sensor_events(self):
671    """Start collecting sensor events on the device.
672
673    See get_sensor_events for more info.
674
675    Returns:
676       Nothing.
677    """
678    cmd = {}
679    cmd[_CMD_NAME_STR] = 'startSensorEvents'
680    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
681    data, _ = self.__read_response_from_socket()
682    if data[_TAG_STR] != 'sensorEventsStarted':
683      raise error_util.CameraItsError('Invalid response for command: %s' %
684                                      cmd[_CMD_NAME_STR])
685
686  def get_sensor_events(self):
687    """Get a trace of all sensor events on the device.
688
689        The trace starts when the start_sensor_events function is called. If
690        the test runs for a long time after this call, then the device's
691        internal memory can fill up. Calling get_sensor_events gets all events
692        from the device, and then stops the device from collecting events and
693        clears the internal buffer; to start again, the start_sensor_events
694        call must be used again.
695
696        Events from the accelerometer, compass, and gyro are returned; each
697        has a timestamp and x,y,z values.
698
699        Note that sensor events are only produced if the device isn't in its
700        standby mode (i.e.) if the screen is on.
701
702    Returns:
703            A Python dictionary with three keys ("accel", "mag", "gyro") each
704            of which maps to a list of objects containing "time","x","y","z"
705            keys.
706    """
707    cmd = {}
708    cmd[_CMD_NAME_STR] = 'getSensorEvents'
709    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
710    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
711    self.sock.settimeout(timeout)
712    data, _ = self.__read_response_from_socket()
713    if data[_TAG_STR] != 'sensorEvents':
714      raise error_util.CameraItsError('Invalid response for command: %s ' %
715                                      cmd[_CMD_NAME_STR])
716    self.sock.settimeout(self.SOCK_TIMEOUT)
717    return data[_OBJ_VALUE_STR]
718
719  def get_camera_ids(self):
720    """Returns the list of all camera_ids.
721
722    Returns:
723      List of camera ids on the device.
724    """
725    cmd = {'cmdName': 'getCameraIds'}
726    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
727    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
728    self.sock.settimeout(timeout)
729    data, _ = self.__read_response_from_socket()
730    if data['tag'] != 'cameraIds':
731      raise error_util.CameraItsError('Invalid command response')
732    return data['objValue']
733
734  def get_camera_name(self):
735    """Gets the camera name.
736
737    Returns:
738      The camera name with camera id and/or hidden physical id.
739    """
740    if self._hidden_physical_id:
741      return f'{self._camera_id}.{self._hidden_physical_id}'
742    else:
743      return self._camera_id
744
745  def get_unavailable_physical_cameras(self, camera_id):
746    """Get the unavailable physical cameras ids.
747
748    Args:
749      camera_id: int; device id
750    Returns:
751      List of all physical camera ids which are unavailable.
752    """
753    cmd = {_CMD_NAME_STR: 'doGetUnavailablePhysicalCameras',
754           _CAMERA_ID_STR: camera_id}
755    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
756    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
757    self.sock.settimeout(timeout)
758    data, _ = self.__read_response_from_socket()
759    if data[_TAG_STR] != 'unavailablePhysicalCameras':
760      raise error_util.CameraItsError('Invalid command response')
761    return data[_OBJ_VALUE_STR]
762
763  def is_hlg10_recording_supported_for_profile(self, profile_id):
764    """Query whether the camera device supports HLG10 video recording.
765
766    Args:
767      profile_id: int; profile id corresponding to the quality level.
768    Returns:
769      Boolean: True if device supports HLG10 video recording, False in
770      all other cases.
771    """
772    cmd = {}
773    cmd[_CMD_NAME_STR] = 'isHLG10SupportedForProfile'
774    cmd[_CAMERA_ID_STR] = self._camera_id
775    cmd['profileId'] = profile_id
776    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
777
778    data, _ = self.__read_response_from_socket()
779    if data[_TAG_STR] != 'hlg10Response':
780      raise error_util.CameraItsError('Failed to query HLG10 support')
781    return data[_STR_VALUE_STR] == 'true'
782
783  def is_hlg10_recording_supported_for_size_and_fps(
784      self, video_size, max_fps):
785    """Query whether the camera device supports HLG10 video recording.
786
787    Args:
788      video_size: String; the hlg10 video recording size.
789      max_fps: int; the maximum frame rate of the camera.
790    Returns:
791      Boolean: True if device supports HLG10 video recording, False in
792      all other cases.
793    """
794    cmd = {}
795    cmd[_CMD_NAME_STR] = 'isHLG10SupportedForSizeAndFps'
796    cmd[_CAMERA_ID_STR] = self._camera_id
797    cmd['videoSize'] = video_size
798    cmd['maxFps'] = max_fps
799    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
800
801    data, _ = self.__read_response_from_socket()
802    if data[_TAG_STR] != 'hlg10Response':
803      raise error_util.CameraItsError('Failed to query HLG10 support')
804    return data[_STR_VALUE_STR] == 'true'
805
806  def is_p3_capture_supported(self):
807    """Query whether the camera device supports P3 image capture.
808
809    Returns:
810      Boolean: True, if device supports P3 image capture, False in
811      all other cases.
812    """
813    cmd = {}
814    cmd[_CMD_NAME_STR] = 'isP3Supported'
815    cmd[_CAMERA_ID_STR] = self._camera_id
816    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
817
818    data, _ = self.__read_response_from_socket()
819    if data[_TAG_STR] != 'p3Response':
820      raise error_util.CameraItsError('Failed to query P3 support')
821    return data[_STR_VALUE_STR] == 'true'
822
823  def is_landscape_to_portrait_enabled(self):
824    """Query whether the device has enabled the landscape to portrait property.
825
826    Returns:
827      Boolean: True, if the device has the system property enabled. False
828      otherwise.
829    """
830    cmd = {}
831    cmd[_CMD_NAME_STR] = 'isLandscapeToPortraitEnabled'
832    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
833
834    data, _ = self.__read_response_from_socket()
835    if data[_TAG_STR] != 'landscapeToPortraitEnabledResponse':
836      raise error_util.CameraItsError(
837          'Failed to query landscape to portrait system property')
838    return data[_STR_VALUE_STR] == 'true'
839
840  def get_supported_video_sizes_capped(self, camera_id):
841    """Get the supported video sizes for camera id.
842
843    Args:
844      camera_id: int; device id
845    Returns:
846      Sorted list of supported video sizes.
847    """
848
849    cmd = {
850        _CMD_NAME_STR: 'doGetSupportedVideoSizesCapped',
851        _CAMERA_ID_STR: camera_id,
852    }
853    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
854    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
855    self.sock.settimeout(timeout)
856    data, _ = self.__read_response_from_socket()
857    if data[_TAG_STR] != 'supportedVideoSizes':
858      raise error_util.CameraItsError('Invalid command response')
859    if not data[_STR_VALUE_STR]:
860      raise error_util.CameraItsError('No supported video sizes')
861    return data[_STR_VALUE_STR].split(';')
862
863  def do_basic_recording(self, profile_id, quality, duration,
864                         video_stabilization_mode=0, hlg10_enabled=False,
865                         zoom_ratio=None, ae_target_fps_min=None,
866                         ae_target_fps_max=None, antibanding_mode=None,
867                         face_detect_mode=None):
868    """Issue a recording request and read back the video recording object.
869
870    The recording will be done with the format specified in quality. These
871    quality levels correspond to the profiles listed in CamcorderProfile.
872    The duration is the time in seconds for which the video will be recorded.
873    The recorded object consists of a path on the device at which the
874    recorded video is saved.
875
876    Args:
877      profile_id: int; profile id corresponding to the quality level.
878      quality: Video recording quality such as High, Low, VGA.
879      duration: The time in seconds for which the video will be recorded.
880      video_stabilization_mode: Video stabilization mode ON/OFF. Value can be
881      0: 'OFF', 1: 'ON', 2: 'PREVIEW'
882      hlg10_enabled: boolean: True Enable 10-bit HLG video recording, False
883      record using the regular SDR profile
884      zoom_ratio: float; zoom ratio. None if default zoom
885      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
886      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
887      antibanding_mode: int; CONTROL_AE_ANTIBANDING_MODE. Set if not None
888      face_detect_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None
889    Returns:
890      video_recorded_object: The recorded object returned from ItsService which
891      contains path at which the recording is saved on the device, quality of
892      the recorded video, video size of the recorded video, video frame rate
893      and 'hlg10' if 'hlg10_enabled' is set to True.
894      Ex:
895      VideoRecordingObject: {
896        'tag': 'recordingResponse',
897        'objValue': {
898          'recordedOutputPath':
899            '/storage/emulated/0/Android/data/com.android.cts.verifier'
900            '/files/VideoITS/VID_20220324_080414_0_CIF_352x288.mp4',
901          'quality': 'CIF',
902          'videoFrameRate': 30,
903          'videoSize': '352x288'
904        }
905      }
906    """
907    cmd = {_CMD_NAME_STR: 'doBasicRecording', _CAMERA_ID_STR: self._camera_id,
908           'profileId': profile_id, 'quality': quality,
909           'recordingDuration': duration,
910           'videoStabilizationMode': video_stabilization_mode,
911           'hlg10Enabled': hlg10_enabled}
912    if zoom_ratio:
913      if self.zoom_ratio_within_range(zoom_ratio):
914        cmd['zoomRatio'] = zoom_ratio
915      else:
916        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
917    if ae_target_fps_min and ae_target_fps_max:
918      cmd['aeTargetFpsMin'] = ae_target_fps_min
919      cmd['aeTargetFpsMax'] = ae_target_fps_max
920    if antibanding_mode:
921      cmd['aeAntibandingMode'] = antibanding_mode
922    else:
923      cmd['aeAntibandingMode'] = 0
924    if face_detect_mode:
925      cmd['faceDetectMode'] = face_detect_mode
926    else:
927      cmd['faceDetectMode'] = 0
928    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
929    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
930    self.sock.settimeout(timeout)
931    data, _ = self.__read_response_from_socket()
932    if data[_TAG_STR] != 'recordingResponse':
933      raise error_util.CameraItsError(
934          f'Invalid response for command: {cmd[_CMD_NAME_STR]}')
935    return data[_OBJ_VALUE_STR]
936
937  def _execute_preview_recording(self, cmd):
938    """Send preview recording command over socket and retrieve output object.
939
940    Args:
941      cmd: dict; Mapping from command key to corresponding value
942    Returns:
943      video_recorded_object: The recorded object returned from ItsService which
944      contains path at which the recording is saved on the device, quality of
945      the recorded video which is always set to "preview", video size of the
946      recorded video, video frame rate.
947      Ex:
948      VideoRecordingObject: {
949        'tag': 'recordingResponse',
950        'objValue': {
951          'recordedOutputPath': '/storage/emulated/0/Android/data/'
952                                'com.android.cts.verifier/files/VideoITS/'
953                                'VID_20220324_080414_0_CIF_352x288.mp4',
954          'quality': 'preview',
955          'videoSize': '352x288'
956        }
957      }
958    """
959    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
960    timeout = (self.SOCK_TIMEOUT_PREVIEW +
961               self.EXTRA_SOCK_TIMEOUT * _EXTRA_TIMEOUT_FACTOR)
962    self.sock.settimeout(timeout)
963
964    data, _ = self.__read_response_from_socket()
965    logging.debug('VideoRecordingObject: %s', str(data))
966    if data[_TAG_STR] != 'recordingResponse':
967      raise error_util.CameraItsError(
968          f'Invalid response from command{cmd[_CMD_NAME_STR]}')
969    return data[_OBJ_VALUE_STR]
970
971  def do_preview_recording_multiple_surfaces(
972      self, output_surfaces, duration, stabilize, ois=False,
973      zoom_ratio=None, ae_target_fps_min=None, ae_target_fps_max=None,
974      antibanding_mode=None, face_detect_mode=None):
975    """Issue a preview request and read back the preview recording object.
976
977    The resolution of the preview and its recording will be determined by
978    video_size. The duration is the time in seconds for which the preview will
979    be recorded. The recorded object consists of a path on the device at
980    which the recorded video is saved.
981
982    Args:
983      output_surfaces: list; The list of output surfaces used for creating
984                             preview recording session. The first surface
985                             is used for recording.
986      duration: int; The time in seconds for which the video will be recorded.
987      stabilize: boolean; Whether the preview should be stabilized or not
988      ois: boolean; Whether the preview should be optically stabilized or not
989      zoom_ratio: float; static zoom ratio. None if default zoom
990      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
991      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
992      antibanding_mode: int; CONTROL_AE_ANTIBANDING_MODE. Set if not None
993      face_detect_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None
994    Returns:
995      video_recorded_object: The recorded object returned from ItsService
996    """
997    cam_id = self._camera_id
998    if 'physicalCamera' in output_surfaces[0]:
999      cam_id = output_surfaces[0]['physicalCamera']
1000    cmd = {
1001        _CMD_NAME_STR: 'doStaticPreviewRecording',
1002        _CAMERA_ID_STR: cam_id,
1003        'outputSurfaces': output_surfaces,
1004        'recordingDuration': duration,
1005        'stabilize': stabilize,
1006        'ois': ois,
1007    }
1008    if zoom_ratio:
1009      if self.zoom_ratio_within_range(zoom_ratio):
1010        cmd['zoomRatio'] = zoom_ratio
1011      else:
1012        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
1013    if ae_target_fps_min and ae_target_fps_max:
1014      cmd['aeTargetFpsMin'] = ae_target_fps_min
1015      cmd['aeTargetFpsMax'] = ae_target_fps_max
1016    if antibanding_mode is not None:
1017      cmd['aeAntibandingMode'] = antibanding_mode
1018    if face_detect_mode is not None:
1019      cmd['faceDetectMode'] = face_detect_mode
1020    return self._execute_preview_recording(cmd)
1021
1022  def do_preview_recording(
1023      self, video_size, duration, stabilize, ois=False, zoom_ratio=None,
1024      ae_target_fps_min=None, ae_target_fps_max=None, hlg10_enabled=False,
1025      antibanding_mode=None, face_detect_mode=None):
1026    """Issue a preview request and read back the preview recording object.
1027
1028    The resolution of the preview and its recording will be determined by
1029    video_size. The duration is the time in seconds for which the preview will
1030    be recorded. The recorded object consists of a path on the device at
1031    which the recorded video is saved.
1032
1033    Args:
1034      video_size: str; Preview resolution at which to record. ex. "1920x1080"
1035      duration: int; The time in seconds for which the video will be recorded.
1036      stabilize: boolean; Whether the preview should be stabilized or not
1037      ois: boolean; Whether the preview should be optically stabilized or not
1038      zoom_ratio: float; static zoom ratio. None if default zoom
1039      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
1040      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
1041      hlg10_enabled: boolean; True Eanable 10-bit HLG video recording, False
1042                              record using the regular SDK profile.
1043      antibanding_mode: int; CONTROL_AE_ANTIBANDING_MODE. Set if not None
1044      face_detect_mode: int; STATISTICS_FACE_DETECT_MODE. Set if not None
1045    Returns:
1046      video_recorded_object: The recorded object returned from ItsService
1047    """
1048    output_surfaces = self.preview_surface(video_size, hlg10_enabled)
1049    return self.do_preview_recording_multiple_surfaces(
1050        output_surfaces, duration, stabilize, ois, zoom_ratio,
1051        ae_target_fps_min, ae_target_fps_max, antibanding_mode,
1052        face_detect_mode)
1053
1054  def do_preview_recording_with_dynamic_zoom(self, video_size, stabilize,
1055                                             sweep_zoom,
1056                                             ae_target_fps_min=None,
1057                                             ae_target_fps_max=None,
1058                                             padded_frames=False):
1059    """Issue a preview request with dynamic zoom and read back output object.
1060
1061    The resolution of the preview and its recording will be determined by
1062    video_size. The duration will be determined by the duration at each zoom
1063    ratio and the total number of zoom ratios. The recorded object consists
1064    of a path on the device at which the recorded video is saved.
1065
1066    Args:
1067      video_size: str; Preview resolution at which to record. ex. "1920x1080"
1068      stabilize: boolean; Whether the preview should be stabilized or not
1069      sweep_zoom: tuple of (zoom_start, zoom_end, step_size, step_duration).
1070        Used to control zoom ratio during recording.
1071        zoom_start (float) is the starting zoom ratio during recording
1072        zoom_end (float) is the ending zoom ratio during recording
1073        step_size (float) is the step for zoom ratio during recording
1074        step_duration (float) sleep in ms between zoom ratios
1075      ae_target_fps_min: int; CONTROL_AE_TARGET_FPS_RANGE min. Set if not None
1076      ae_target_fps_max: int; CONTROL_AE_TARGET_FPS_RANGE max. Set if not None
1077      padded_frames: boolean; Whether to add additional frames at the beginning
1078        and end of recording to workaround issue with MediaRecorder.
1079    Returns:
1080      video_recorded_object: The recorded object returned from ItsService
1081    """
1082    output_surface = self.preview_surface(video_size)
1083    cmd = {
1084        _CMD_NAME_STR: 'doDynamicZoomPreviewRecording',
1085        _CAMERA_ID_STR: self._camera_id,
1086        'outputSurfaces': output_surface,
1087        'stabilize': stabilize,
1088        'ois': False
1089    }
1090    zoom_start, zoom_end, step_size, step_duration = sweep_zoom
1091    if (not self.zoom_ratio_within_range(zoom_start) or
1092        not self.zoom_ratio_within_range(zoom_end)):
1093      raise AssertionError(
1094          f'Starting zoom ratio {zoom_start} or '
1095          f'ending zoom ratio {zoom_end} out of range'
1096      )
1097    if zoom_start > zoom_end or step_size < 0:
1098      raise NotImplementedError('Only increasing zoom ratios are supported')
1099    cmd['zoomStart'] = zoom_start
1100    cmd['zoomEnd'] = zoom_end
1101    cmd['stepSize'] = step_size
1102    cmd['stepDuration'] = step_duration
1103    cmd['hlg10Enabled'] = False
1104    cmd['paddedFrames'] = padded_frames
1105    if ae_target_fps_min and ae_target_fps_max:
1106      cmd['aeTargetFpsMin'] = ae_target_fps_min
1107      cmd['aeTargetFpsMax'] = ae_target_fps_max
1108    return self._execute_preview_recording(cmd)
1109
1110  def do_preview_recording_with_dynamic_ae_awb_region(
1111      self, video_size, ae_awb_regions, ae_awb_region_duration, stabilize=False,
1112      ae_target_fps_min=None, ae_target_fps_max=None):
1113    """Issue a preview request with dynamic 3A region and read back output object.
1114
1115    The resolution of the preview and its recording will be determined by
1116    video_size. The recorded object consists of a path on the device at which
1117    the recorded video is saved.
1118
1119    Args:
1120      video_size: str; Preview resolution at which to record. ex. "1920x1080"
1121      ae_awb_regions: dictionary of (aeAwbRegionOne/Two/Three/Four).
1122        Used to control 3A region during recording.
1123        aeAwbRegionOne (metering rectangle) first ae/awb region of recording.
1124        aeAwbRegionTwo (metering rectangle) second ae/awb region of recording.
1125        aeAwbRegionThree (metering rectangle) third ae/awb region of recording.
1126        aeAwbRegionFour (metering rectangle) fourth ae/awb region of recording.
1127      ae_awb_region_duration: float; sleep in ms between 3A regions.
1128      stabilize: boolean; Whether the preview should be stabilized.
1129      ae_target_fps_min: int; If not none, set CONTROL_AE_TARGET_FPS_RANGE min.
1130      ae_target_fps_max: int; If not none, set CONTROL_AE_TARGET_FPS_RANGE max.
1131    Returns:
1132      video_recorded_object: The recorded object returned from ItsService.
1133    """
1134    output_surface = self.preview_surface(video_size)
1135    cmd = {
1136        _CMD_NAME_STR: 'doDynamicMeteringRegionPreviewRecording',
1137        _CAMERA_ID_STR: self._camera_id,
1138        'outputSurfaces': output_surface,
1139        'stabilize': stabilize,
1140        'ois': False,
1141        'aeAwbRegionDuration': ae_awb_region_duration
1142    }
1143
1144    cmd['aeAwbRegionOne'] = ae_awb_regions['aeAwbRegionOne']
1145    cmd['aeAwbRegionTwo'] = ae_awb_regions['aeAwbRegionTwo']
1146    cmd['aeAwbRegionThree'] = ae_awb_regions['aeAwbRegionThree']
1147    cmd['aeAwbRegionFour'] = ae_awb_regions['aeAwbRegionFour']
1148    cmd['hlg10Enabled'] = False
1149    if ae_target_fps_min and ae_target_fps_max:
1150      cmd['aeTargetFpsMin'] = ae_target_fps_min
1151      cmd['aeTargetFpsMax'] = ae_target_fps_max
1152    return self._execute_preview_recording(cmd)
1153
1154  def get_supported_video_qualities(self, camera_id):
1155    """Get all supported video qualities for this camera device.
1156
1157    ie. ['480:4', '1080:6', '2160:8', '720:5', 'CIF:3', 'HIGH:1', 'LOW:0',
1158         'QCIF:2', 'QVGA:7']
1159
1160    Args:
1161      camera_id: device id
1162    Returns:
1163      List of all supported video qualities and corresponding profileIds.
1164    """
1165    cmd = {}
1166    cmd[_CMD_NAME_STR] = 'getSupportedVideoQualities'
1167    cmd[_CAMERA_ID_STR] = camera_id
1168    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1169    data, _ = self.__read_response_from_socket()
1170    if data[_TAG_STR] != 'supportedVideoQualities':
1171      raise error_util.CameraItsError('Invalid command response')
1172    return data[_STR_VALUE_STR].split(';')[:-1]  # remove the last appended ';'
1173
1174  def get_all_supported_preview_sizes(self, camera_id, filter_recordable=False):
1175    """Get all supported preview resolutions for this camera device.
1176
1177    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1178
1179    Note: resolutions are sorted by width x height in ascending order
1180
1181    Args:
1182      camera_id: int; device id
1183      filter_recordable: filter preview sizes if supported for video recording
1184                       using MediaRecorder
1185
1186    Returns:
1187      List of all supported preview resolutions in ascending order.
1188    """
1189    cmd = {
1190        _CMD_NAME_STR: 'getSupportedPreviewSizes',
1191        _CAMERA_ID_STR: camera_id,
1192        'filter_recordable': filter_recordable,
1193    }
1194    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1195    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1196    self.sock.settimeout(timeout)
1197    data, _ = self.__read_response_from_socket()
1198    if data[_TAG_STR] != 'supportedPreviewSizes':
1199      raise error_util.CameraItsError('Invalid command response')
1200    if not data[_STR_VALUE_STR]:
1201      raise error_util.CameraItsError('No supported preview sizes')
1202    supported_preview_sizes = data[_STR_VALUE_STR].split(';')
1203    logging.debug('Supported preview sizes: %s', supported_preview_sizes)
1204    return supported_preview_sizes
1205
1206  def get_supported_preview_sizes(self, camera_id):
1207    """Get supported preview resolutions for this camera device.
1208
1209    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1210
1211    Note: resolutions are sorted by width x height in ascending order
1212    Note: max resolution is capped at 1440x1920.
1213    Note: min resolution is capped at 320x240.
1214
1215    Args:
1216      camera_id: int; device id
1217
1218    Returns:
1219      List of all supported preview resolutions with floor & ceiling set
1220      by _CONSTANTS in ascending order.
1221    """
1222    supported_preview_sizes = self.get_all_supported_preview_sizes(camera_id)
1223    resolution_to_area = lambda s: int(s.split('x')[0])*int(s.split('x')[1])
1224    supported_preview_sizes = [size for size in supported_preview_sizes
1225                               if (resolution_to_area(size)
1226                                   <= PREVIEW_MAX_TESTED_AREA
1227                                   and resolution_to_area(size)
1228                                   >= PREVIEW_MIN_TESTED_AREA)]
1229    logging.debug(
1230        'Supported preview sizes (MIN: %d, MAX: %d area in pixels): %s',
1231        PREVIEW_MIN_TESTED_AREA, PREVIEW_MAX_TESTED_AREA,
1232        supported_preview_sizes
1233    )
1234    return supported_preview_sizes
1235
1236  def get_supported_extension_preview_sizes(self, camera_id, extension):
1237    """Get all supported preview resolutions for the extension mode.
1238
1239    ie. ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1240
1241    Note: resolutions are sorted by width x height in ascending order
1242
1243    Args:
1244      camera_id: int; device id
1245      extension: int; camera extension mode
1246
1247    Returns:
1248      List of all supported camera extension preview resolutions in
1249      ascending order.
1250    """
1251    cmd = {
1252        _CMD_NAME_STR: 'getSupportedExtensionPreviewSizes',
1253        _CAMERA_ID_STR: camera_id,
1254        "extension": extension  # pylint: disable=g-inconsistent-quotes
1255    }
1256    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1257    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1258    self.sock.settimeout(timeout)
1259    data, _ = self.__read_response_from_socket()
1260    if data[_TAG_STR] != 'supportedExtensionPreviewSizes':
1261      raise error_util.CameraItsError('Invalid command response')
1262    if not data[_STR_VALUE_STR]:
1263      raise error_util.CameraItsError('No supported extension preview sizes')
1264    supported_preview_sizes = data[_STR_VALUE_STR].split(';')
1265    logging.debug('Supported extension preview sizes: %s', supported_preview_sizes)
1266    return supported_preview_sizes
1267
1268  def get_queryable_stream_combinations(self):
1269    """Get all queryable stream combinations for this camera device.
1270
1271    This function parses the queryable stream combinations string
1272    returned from ItsService. The return value includes both the
1273    string and the parsed result.
1274
1275    One example of the queryable stream combination string is:
1276
1277    'priv:1920x1080+jpeg:4032x2268;priv:1280x720+priv:1280x720'
1278
1279    which can be parsed to:
1280
1281    [
1282      {
1283       "name": "priv:1920x1080+jpeg:4032x2268",
1284       "combination": [
1285                        {
1286                         "format": "priv",
1287                         "size": "1920x1080"
1288                        }
1289                        {
1290                         "format": "jpeg",
1291                         "size": "4032x2268"
1292                        }
1293                      ]
1294      }
1295      {
1296       "name": "priv:1280x720+priv:1280x720",
1297       "combination": [
1298                        {
1299                         "format": "priv",
1300                         "size": "1280x720"
1301                        },
1302                        {
1303                         "format": "priv",
1304                         "size": "1280x720"
1305                        }
1306                      ]
1307      }
1308    ]
1309
1310    Returns:
1311      Tuple of:
1312      - queryable stream combination string, and
1313      - parsed stream combinations
1314    """
1315    cmd = {
1316        _CMD_NAME_STR: 'getQueryableStreamCombinations',
1317    }
1318    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1319    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1320    self.sock.settimeout(timeout)
1321    data, _ = self.__read_response_from_socket()
1322    if data[_TAG_STR] != 'queryableStreamCombinations':
1323      raise error_util.CameraItsError('Invalid command response')
1324    if not data[_STR_VALUE_STR]:
1325      raise error_util.CameraItsError('No queryable stream combinations')
1326
1327    # Parse the stream combination string
1328    combinations = [{
1329        'name': c, 'combination': [
1330            {'format': s.split(':')[0],
1331             'size': s.split(':')[1]} for s in c.split('+')]}
1332                    for c in data[_STR_VALUE_STR].split(';')]
1333
1334    return data[_STR_VALUE_STR], combinations
1335
1336  def get_supported_extensions(self, camera_id):
1337    """Get all supported camera extensions for this camera device.
1338
1339    ie. [EXTENSION_AUTOMATIC, EXTENSION_BOKEH,
1340         EXTENSION_FACE_RETOUCH, EXTENSION_HDR, EXTENSION_NIGHT]
1341    where EXTENSION_AUTOMATIC is 0, EXTENSION_BOKEH is 1, etc.
1342
1343    Args:
1344      camera_id: int; device ID
1345    Returns:
1346      List of all supported extensions (as int) in ascending order.
1347    """
1348    cmd = {
1349        'cmdName': 'getSupportedExtensions',
1350        'cameraId': camera_id
1351    }
1352    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1353    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1354    self.sock.settimeout(timeout)
1355    data, _ = self.__read_response_from_socket()
1356    if data['tag'] != 'supportedExtensions':
1357      raise error_util.CameraItsError('Invalid command response')
1358    if not data['strValue']:
1359      raise error_util.CameraItsError('No supported extensions')
1360    return [int(x) for x in str(data['strValue'][1:-1]).split(', ') if x]
1361
1362  def get_supported_extension_sizes(self, camera_id, extension, image_format):
1363    """Get all supported camera sizes for this camera, extension, and format.
1364
1365    Sorts in ascending order according to area, i.e.
1366    ['640x480', '800x600', '1280x720', '1440x1080', '1920x1080']
1367
1368    Args:
1369      camera_id: int; device ID
1370      extension: int; the integer value of the extension.
1371      image_format: int; the integer value of the format.
1372    Returns:
1373      List of sizes supported for this camera, extension, and format.
1374    """
1375    cmd = {
1376        'cmdName': 'getSupportedExtensionSizes',
1377        'cameraId': camera_id,
1378        'extension': extension,
1379        'format': image_format
1380    }
1381    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1382    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1383    self.sock.settimeout(timeout)
1384    data, _ = self.__read_response_from_socket()
1385    if data[_TAG_STR] != 'supportedExtensionSizes':
1386      raise error_util.CameraItsError('Invalid command response')
1387    if not data[_STR_VALUE_STR]:
1388      logging.debug('No supported extension sizes')
1389      return ''
1390    return data[_STR_VALUE_STR].split(';')
1391
1392  def get_display_size(self):
1393    """Get the display size of the screen.
1394
1395    Returns:
1396      The size of the display resolution in pixels.
1397    """
1398    cmd = {
1399        'cmdName': 'getDisplaySize'
1400    }
1401    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1402    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1403    self.sock.settimeout(timeout)
1404    data, _ = self.__read_response_from_socket()
1405    if data['tag'] != 'displaySize':
1406      raise error_util.CameraItsError('Invalid command response')
1407    if not data['strValue']:
1408      raise error_util.CameraItsError('No display size')
1409    return data['strValue'].split('x')
1410
1411  def get_max_camcorder_profile_size(self, camera_id):
1412    """Get the maximum camcorder profile size for this camera device.
1413
1414    Args:
1415      camera_id: int; device id
1416    Returns:
1417      The maximum size among all camcorder profiles supported by this camera.
1418    """
1419    cmd = {
1420        'cmdName': 'getMaxCamcorderProfileSize',
1421        'cameraId': camera_id
1422    }
1423    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1424    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
1425    self.sock.settimeout(timeout)
1426    data, _ = self.__read_response_from_socket()
1427    if data['tag'] != 'maxCamcorderProfileSize':
1428      raise error_util.CameraItsError('Invalid command response')
1429    if not data['strValue']:
1430      raise error_util.CameraItsError('No max camcorder profile size')
1431    return data['strValue'].split('x')
1432
1433  def do_simple_capture(self, cmd, out_surface):
1434    """Issue single capture request via command and read back image/metadata.
1435
1436    Args:
1437      cmd: Dictionary specifying command name, requests, and output surface.
1438      out_surface: Dictionary describing output surface.
1439    Returns:
1440      An object which contains following fields:
1441      * data: the image data as a numpy array of bytes.
1442      * width: the width of the captured image.
1443      * height: the height of the captured image.
1444      * format: image format
1445      * metadata: the capture result object
1446    """
1447    fmt = out_surface['format'] if 'format' in out_surface else 'yuv'
1448    if fmt == 'jpg': fmt = 'jpeg'
1449
1450    # we only have 1 capture request and 1 surface by definition.
1451    ncap = SINGLE_CAPTURE_NCAP
1452
1453    cam_id = None
1454    bufs = {}
1455    yuv_bufs = {}
1456    if self._hidden_physical_id:
1457      out_surface['physicalCamera'] = self._hidden_physical_id
1458
1459    if 'physicalCamera' in out_surface:
1460      cam_id = out_surface['physicalCamera']
1461    else:
1462      cam_id = self._camera_id
1463
1464    bufs[cam_id] = {
1465        'raw': [],
1466        'raw10': [],
1467        'raw12': [],
1468        'rawStats': [],
1469        'dng': [],
1470        'jpeg': [],
1471        'y8': [],
1472        'rawQuadBayer': [],
1473        'rawQuadBayerStats': [],
1474        'raw10Stats': [],
1475        'raw10QuadBayerStats': [],
1476        'raw10QuadBayer': [],
1477    }
1478
1479    # Only allow yuv output to multiple targets
1480    yuv_surface = None
1481    if cam_id == self._camera_id:
1482      if 'physicalCamera' not in out_surface:
1483        if out_surface['format'] == 'yuv':
1484          yuv_surface = out_surface
1485    else:
1486      if ('physicalCamera' in out_surface and
1487          out_surface['physicalCamera'] == cam_id):
1488        if out_surface['format'] == 'yuv':
1489          yuv_surface = out_surface
1490
1491    # Compute the buffer size of YUV targets
1492    yuv_maxsize_1d = 0
1493    if yuv_surface is not None:
1494      if ('width' not in yuv_surface and 'height' not in yuv_surface):
1495        if self.props is None:
1496          raise error_util.CameraItsError('Camera props are unavailable')
1497        yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
1498            'yuv', self.props)[0]
1499        # YUV420 size = 1.5 bytes per pixel
1500        yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
1501      if 'width' in yuv_surface and 'height' in yuv_surface:
1502        yuv_size = (yuv_surface['width'] * yuv_surface['height'] * 3) // 2
1503      else:
1504        yuv_size = yuv_maxsize_1d
1505
1506      yuv_bufs[cam_id] = {yuv_size: []}
1507
1508    cam_ids = self._camera_id
1509    self.sock.settimeout(self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT)
1510    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
1511
1512    nbufs = 0
1513    md = None
1514    physical_md = None
1515    width = None
1516    height = None
1517    capture_results_returned = False
1518    while (nbufs < ncap) or (not capture_results_returned):
1519      json_obj, buf = self.__read_response_from_socket()
1520      if (json_obj[_TAG_STR] in ItsSession.IMAGE_FORMAT_LIST_1 and
1521          buf is not None):
1522        fmt = json_obj[_TAG_STR][:-5]
1523        bufs[self._camera_id][fmt].append(buf)
1524        nbufs += 1
1525      elif json_obj[_TAG_STR] == 'yuvImage':
1526        buf_size = get_array_size(buf)
1527        yuv_bufs[self._camera_id][buf_size].append(buf)
1528        nbufs += 1
1529      elif json_obj[_TAG_STR] == 'captureResults':
1530        capture_results_returned = True
1531        md = json_obj[_OBJ_VALUE_STR]['captureResult']
1532        physical_md = json_obj[_OBJ_VALUE_STR]['physicalResults']
1533        outputs = json_obj[_OBJ_VALUE_STR]['outputs']
1534        returned_fmt = outputs[0]['format']
1535        if fmt != returned_fmt:
1536          raise AssertionError(
1537              f'Incorrect format. Requested: {fmt}, '
1538              f'Received: {returned_fmt}')
1539        width = outputs[0]['width']
1540        height = outputs[0]['height']
1541        requested_width = out_surface['width']
1542        requested_height = out_surface['height']
1543        if requested_width != width or requested_height != height:
1544          raise AssertionError(
1545              'Incorrect size. '
1546              f'Requested: {requested_width}x{requested_height}, '
1547              f'Received: {width}x{height}')
1548      else:
1549        tag_string = unicodedata.normalize('NFKD', json_obj[_TAG_STR]).encode(
1550            'ascii', 'ignore')
1551        for x in ItsSession.IMAGE_FORMAT_LIST_2:
1552          x = bytes(x, encoding='utf-8')
1553          if tag_string.startswith(x):
1554            if x == b'yuvImage':
1555              physical_id = json_obj[_TAG_STR][len(x):]
1556              if physical_id in cam_ids:
1557                buf_size = get_array_size(buf)
1558                yuv_bufs[physical_id][buf_size].append(buf)
1559                nbufs += 1
1560            else:
1561              physical_id = json_obj[_TAG_STR][len(x):]
1562              if physical_id in cam_ids:
1563                fmt = x[:-5].decode('UTF-8')
1564                bufs[physical_id][fmt].append(buf)
1565                nbufs += 1
1566
1567    if 'physicalCamera' in out_surface:
1568      cam_id = out_surface['physicalCamera']
1569    else:
1570      cam_id = self._camera_id
1571    ret = {'width': width, 'height': height, 'format': fmt}
1572    if cam_id == self._camera_id:
1573      ret['metadata'] = md
1574    else:
1575      if cam_id in physical_md:
1576        ret['metadata'] = physical_md[cam_id]
1577
1578    if fmt == 'yuv':
1579      buf_size = (width * height * 3) // 2
1580      ret['data'] = yuv_bufs[cam_id][buf_size][0]
1581    else:
1582      ret['data'] = bufs[cam_id][fmt][0]
1583
1584    return ret
1585
1586  def do_jca_capture(self, dut, log_path, flash, facing):
1587    """Take a capture using JCA, modifying capture settings using the UI.
1588
1589    Selects UI elements to modify settings, and presses the capture button.
1590    Reads response from socket containing the capture path, and
1591    pulls the image from the DUT.
1592
1593    This method is included here because an ITS session is needed to retrieve
1594    the capture path from the device.
1595
1596    Args:
1597      dut: An Android controller device object.
1598      log_path: str; log path to save screenshots.
1599      flash: str; constant describing the desired flash mode.
1600        Acceptable values: 'OFF' and 'AUTO'.
1601      facing: str; constant describing the direction the camera lens faces.
1602        Acceptable values: camera_properties_utils.LENS_FACING[BACK, FRONT]
1603    Returns:
1604      The host-side path of the capture.
1605    """
1606    ui_interaction_utils.open_jca_viewfinder(dut, log_path)
1607    ui_interaction_utils.switch_jca_camera(dut, log_path, facing)
1608    # Bring up settings, switch flash mode, and close settings
1609    dut.ui(res=ui_interaction_utils.QUICK_SETTINGS_RESOURCE_ID).click()
1610    if flash not in ui_interaction_utils.FLASH_MODE_TO_CLICKS:
1611      raise ValueError(f'Flash mode {flash} not supported')
1612    for _ in range(ui_interaction_utils.FLASH_MODE_TO_CLICKS[flash]):
1613      dut.ui(res=ui_interaction_utils.QUICK_SET_FLASH_RESOURCE_ID).click()
1614    dut.take_screenshot(log_path, prefix='flash_mode_set')
1615    dut.ui(res=ui_interaction_utils.QUICK_SETTINGS_RESOURCE_ID).click()
1616    # Take capture
1617    dut.ui(res=ui_interaction_utils.CAPTURE_BUTTON_RESOURCE_ID).click()
1618    return self.get_and_pull_jca_capture(dut, log_path)
1619
1620  def do_jca_video_capture(self, dut, log_path, duration):
1621    """Take a capture using JCA using the UI.
1622
1623    Captures JCA video by holding the capture button with requested duration.
1624    Reads response from socket containing the capture path, and
1625    pulls the image from the DUT.
1626
1627    This method is included here because an ITS session is needed to retrieve
1628    the capture path from the device.
1629
1630    Args:
1631      dut: An Android controller device object.
1632      log_path: str; log path to save screenshots.
1633      duration: int; requested video duration, in ms.
1634    Returns:
1635      The host-side path of the capture.
1636    """
1637    # Make sure JCA is started
1638    jca_capture_button_visible = dut.ui(
1639        res=ui_interaction_utils.CAPTURE_BUTTON_RESOURCE_ID).wait.exists(
1640            ui_interaction_utils.UI_OBJECT_WAIT_TIME_SECONDS)
1641    if not jca_capture_button_visible:
1642      raise AssertionError('JCA was not started! Please use'
1643                           'open_jca_viewfinder() or do_jca_video_setup()'
1644                           'in ui_interaction_utils.py to start JCA.')
1645    dut.ui(res=ui_interaction_utils.CAPTURE_BUTTON_RESOURCE_ID).click(duration)
1646    return self.get_and_pull_jca_capture(dut, log_path)
1647
1648  def get_and_pull_jca_capture(self, dut, log_path):
1649    """Retrieve a capture path from the socket and pulls capture to host.
1650
1651    Args:
1652      dut: An Android controller device object.
1653      log_path: str; log path to save screenshots.
1654    Returns:
1655      The host-side path of the capture.
1656    Raises:
1657      CameraItsError: If unexpected data is retrieved from the socket.
1658    """
1659    capture_path, capture_status = None, None
1660    while not capture_path or not capture_status:
1661      data, _ = self.__read_response_from_socket()
1662      if data[_TAG_STR] == JCA_CAPTURE_PATH_TAG:
1663        capture_path = data[_STR_VALUE_STR]
1664      elif data[_TAG_STR] == JCA_CAPTURE_STATUS_TAG:
1665        capture_status = data[_STR_VALUE_STR]
1666      else:
1667        raise error_util.CameraItsError(
1668            f'Invalid response {data[_TAG_STR]} for JCA capture')
1669    if capture_status != RESULT_OK_STATUS:
1670      logging.error('Capture failed! Expected status %d, received %d',
1671                    RESULT_OK_STATUS, capture_status)
1672    logging.debug('capture path: %s', capture_path)
1673    _, capture_name = os.path.split(capture_path)
1674    its_device_utils.run(f'adb -s {dut.serial} pull {capture_path} {log_path}')
1675    return os.path.join(log_path, capture_name)
1676
1677  def do_capture_with_flash(self,
1678                            preview_request_start,
1679                            preview_request_idle,
1680                            still_capture_req,
1681                            out_surface):
1682    """Issue capture request with flash and read back the image and metadata.
1683
1684    Captures a single image with still_capture_req as capture request
1685    with flash. It triggers the precapture sequence with preview request
1686    preview_request_start with capture intent preview by setting aePrecapture
1687    trigger to Start. This is followed by repeated preview requests
1688    preview_request_idle with aePrecaptureTrigger set to IDLE.
1689    Once the AE is converged, a single image is captured still_capture_req
1690    during which the flash must be fired.
1691    Note: The part where we read output data from socket is cloned from
1692    do_capture and will be consolidated in U.
1693
1694    Args:
1695      preview_request_start: Preview request with aePrecaptureTrigger set to
1696        Start
1697      preview_request_idle: Preview request with aePrecaptureTrigger set to Idle
1698      still_capture_req: Single still capture request.
1699      out_surface: Specifications of the output image formats and
1700        sizes to use for capture. Supports yuv and jpeg.
1701    Returns:
1702      An object which contains following fields:
1703      * data: the image data as a numpy array of bytes.
1704      * width: the width of the captured image.
1705      * height: the height of the captured image.
1706      * format: image format
1707      * metadata: the capture result object
1708    """
1709    cmd = {}
1710    cmd[_CMD_NAME_STR] = 'doCaptureWithFlash'
1711    cmd['previewRequestStart'] = [preview_request_start]
1712    cmd['previewRequestIdle'] = [preview_request_idle]
1713    cmd['stillCaptureRequest'] = [still_capture_req]
1714    cmd['outputSurfaces'] = [out_surface]
1715    if 'android.control.aeMode' in still_capture_req:
1716      logging.debug('Capturing image with aeMode: %d',
1717                    still_capture_req['android.control.aeMode'])
1718    return self.do_simple_capture(cmd, out_surface)
1719
1720  def do_capture_with_extensions(self,
1721                                 cap_request,
1722                                 extension,
1723                                 out_surface):
1724    """Issue extension capture request(s), and read back image(s) and metadata.
1725
1726    Args:
1727      cap_request: The Python dict/list specifying the capture(s), which will be
1728        converted to JSON and sent to the device.
1729      extension: The extension to be requested.
1730      out_surface: specifications of the output image format and
1731        size to use for the capture.
1732
1733    Returns:
1734      An object, list of objects, or list of lists of objects, where each
1735      object contains the following fields:
1736      * data: the image data as a numpy array of bytes.
1737      * width: the width of the captured image.
1738      * height: the height of the captured image.
1739      * format: image the format, in [
1740                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
1741      * metadata: the capture result object (Python dictionary).
1742    """
1743    cmd = {}
1744    cmd[_CMD_NAME_STR] = 'doCaptureWithExtensions'
1745    cmd['repeatRequests'] = []
1746    cmd['captureRequests'] = [cap_request]
1747    cmd['extension'] = extension
1748    cmd['outputSurfaces'] = [out_surface]
1749
1750    logging.debug('Capturing image with EXTENSIONS.')
1751    return self.do_simple_capture(cmd, out_surface)
1752
1753  def do_capture(self,
1754                 cap_request,
1755                 out_surfaces=None,
1756                 reprocess_format=None,
1757                 repeat_request=None,
1758                 reuse_session=False,
1759                 first_surface_for_3a=False):
1760    """Issue capture request(s), and read back the image(s) and metadata.
1761
1762    The main top-level function for capturing one or more images using the
1763    device. Captures a single image if cap_request is a single object, and
1764    captures a burst if it is a list of objects.
1765
1766    The optional repeat_request field can be used to assign a repeating
1767    request list ran in background for 3 seconds to warm up the capturing
1768    pipeline before start capturing. The repeat_requests will be ran on a
1769    640x480 YUV surface without sending any data back. The caller needs to
1770    make sure the stream configuration defined by out_surfaces and
1771    repeat_request are valid or do_capture may fail because device does not
1772    support such stream configuration.
1773
1774    The out_surfaces field can specify the width(s), height(s), and
1775    format(s) of the captured image. The formats may be "yuv", "jpeg",
1776    "dng", "raw", "raw10", "raw12", "rawStats" or "y8". The default is a
1777    YUV420 frame ("yuv") corresponding to a full sensor frame.
1778
1779    1. Optionally the out_surfaces field can specify physical camera id(s) if
1780    the current camera device is a logical multi-camera. The physical camera
1781    id must refer to a physical camera backing this logical camera device.
1782    2. Optionally The output_surfaces field can also specify the use case(s) if
1783    the current camera device has STREAM_USE_CASE capability.
1784
1785    Note that one or more surfaces can be specified, allowing a capture to
1786    request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
1787    yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
1788    default is the largest resolution available for the format of that
1789    surface. At most one output surface can be specified for a given format,
1790    and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations.
1791
1792    If reprocess_format is not None, for each request, an intermediate
1793    buffer of the given reprocess_format will be captured from camera and
1794    the intermediate buffer will be reprocessed to the output surfaces. The
1795    following settings will be turned off when capturing the intermediate
1796    buffer and will be applied when reprocessing the intermediate buffer.
1797    1. android.noiseReduction.mode
1798    2. android.edge.mode
1799    3. android.reprocess.effectiveExposureFactor
1800
1801    Supported reprocess format are "yuv" and "private". Supported output
1802    surface formats when reprocessing is enabled are "yuv" and "jpeg".
1803
1804    Example of a single capture request:
1805
1806    {
1807     "android.sensor.exposureTime": 100*1000*1000,
1808     "android.sensor.sensitivity": 100
1809    }
1810
1811    Example of a list of capture requests:
1812    [
1813     {
1814       "android.sensor.exposureTime": 100*1000*1000,
1815       "android.sensor.sensitivity": 100
1816     },
1817    {
1818      "android.sensor.exposureTime": 100*1000*1000,
1819       "android.sensor.sensitivity": 200
1820     }
1821    ]
1822
1823    Example of output surface specifications:
1824    {
1825     "width": 640,
1826     "height": 480,
1827     "format": "yuv"
1828    }
1829    [
1830     {
1831       "format": "jpeg"
1832     },
1833     {
1834       "format": "raw"
1835     }
1836    ]
1837
1838    The following variables defined in this class are shortcuts for
1839    specifying one or more formats where each output is the full size for
1840    that format; they can be used as values for the out_surfaces arguments:
1841
1842    CAP_RAW
1843    CAP_DNG
1844    CAP_YUV
1845    CAP_JPEG
1846    CAP_RAW_YUV
1847    CAP_DNG_YUV
1848    CAP_RAW_JPEG
1849    CAP_DNG_JPEG
1850    CAP_YUV_JPEG
1851    CAP_RAW_YUV_JPEG
1852    CAP_DNG_YUV_JPEG
1853
1854    If multiple formats are specified, then this function returns multiple
1855    capture objects, one for each requested format. If multiple formats and
1856    multiple captures (i.e. a burst) are specified, then this function
1857    returns multiple lists of capture objects. In both cases, the order of
1858    the returned objects matches the order of the requested formats in the
1859    out_surfaces parameter. For example:
1860
1861    yuv_cap = do_capture(req1)
1862    yuv_cap = do_capture(req1,yuv_fmt)
1863    yuv_cap, raw_cap = do_capture(req1, [yuv_fmt,raw_fmt])
1864    yuv_caps = do_capture([req1,req2], yuv_fmt)
1865    yuv_caps, raw_caps = do_capture([req1,req2], [yuv_fmt,raw_fmt])
1866
1867    The "rawStats" format processes the raw image and returns a new image
1868    of statistics from the raw image. The format takes additional keys,
1869    "gridWidth" and "gridHeight" which are size of grid cells in a 2D grid
1870    of the raw image. For each grid cell, the mean and variance of each raw
1871    channel is computed, and the do_capture call returns two 4-element float
1872    images of dimensions (rawWidth / gridWidth, rawHeight / gridHeight),
1873    concatenated back-to-back, where the first image contains the 4-channel
1874    means and the second contains the 4-channel variances. Note that only
1875    pixels in the active array crop region are used; pixels outside this
1876    region (for example optical black rows) are cropped out before the
1877    gridding and statistics computation is performed.
1878
1879    For the rawStats format, if the gridWidth is not provided then the raw
1880    image width is used as the default, and similarly for gridHeight. With
1881    this, the following is an example of a output description that computes
1882    the mean and variance across each image row:
1883    {
1884      "gridHeight": 1,
1885      "format": "rawStats"
1886    }
1887
1888    Args:
1889      cap_request: The Python dict/list specifying the capture(s), which will be
1890        converted to JSON and sent to the device.
1891      out_surfaces: (Optional) specifications of the output image formats and
1892        sizes to use for each capture.
1893      reprocess_format: (Optional) The reprocessing format. If not
1894        None,reprocessing will be enabled.
1895      repeat_request: Repeating request list.
1896      reuse_session: True if ItsService.java should try to use
1897        the existing CameraCaptureSession.
1898      first_surface_for_3a: Use first surface in out_surfaces for 3A, not capture
1899        Only applicable if out_surfaces contains at least 1 surface.
1900
1901    Returns:
1902      An object, list of objects, or list of lists of objects, where each
1903      object contains the following fields:
1904      * data: the image data as a numpy array of bytes.
1905      * width: the width of the captured image.
1906      * height: the height of the captured image.
1907      * format: image the format, in [
1908                        "yuv","jpeg","raw","raw10","raw12","rawStats","dng"].
1909      * metadata: the capture result object (Python dictionary).
1910    """
1911    cmd = {}
1912    if reprocess_format is not None:
1913      if repeat_request is not None:
1914        raise error_util.CameraItsError(
1915            'repeating request + reprocessing is not supported')
1916      cmd[_CMD_NAME_STR] = 'doReprocessCapture'
1917      cmd['reprocessFormat'] = reprocess_format
1918    else:
1919      cmd[_CMD_NAME_STR] = 'doCapture'
1920
1921    if repeat_request is None:
1922      cmd['repeatRequests'] = []
1923    elif not isinstance(repeat_request, list):
1924      cmd['repeatRequests'] = [repeat_request]
1925    else:
1926      cmd['repeatRequests'] = repeat_request
1927
1928    if not isinstance(cap_request, list):
1929      cmd['captureRequests'] = [cap_request]
1930    else:
1931      cmd['captureRequests'] = cap_request
1932
1933    if out_surfaces:
1934      if isinstance(out_surfaces, list):
1935        cmd['outputSurfaces'] = out_surfaces
1936      else:
1937        cmd['outputSurfaces'] = [out_surfaces]
1938      formats = [
1939          c['format'] if 'format' in c else 'yuv' for c in cmd['outputSurfaces']
1940      ]
1941      formats = [s if s != 'jpg' else 'jpeg' for s in formats]
1942    else:
1943      max_yuv_size = capture_request_utils.get_available_output_sizes(
1944          'yuv', self.props)[0]
1945      formats = ['yuv']
1946      cmd['outputSurfaces'] = [{
1947          'format': 'yuv',
1948          'width': max_yuv_size[0],
1949          'height': max_yuv_size[1]
1950      }]
1951
1952    cmd['reuseSession'] = reuse_session
1953    cmd['firstSurfaceFor3A'] = first_surface_for_3a
1954
1955    requested_surfaces = cmd['outputSurfaces'][:]
1956    if first_surface_for_3a:
1957      formats.pop(0)
1958      requested_surfaces.pop(0)
1959
1960    ncap = len(cmd['captureRequests'])
1961    nsurf = len(formats)
1962
1963    cam_ids = []
1964    bufs = {}
1965    yuv_bufs = {}
1966    for i, s in enumerate(cmd['outputSurfaces']):
1967      if self._hidden_physical_id:
1968        s['physicalCamera'] = self._hidden_physical_id
1969
1970      if 'physicalCamera' in s:
1971        cam_id = s['physicalCamera']
1972      else:
1973        cam_id = self._camera_id
1974
1975      if cam_id not in cam_ids:
1976        cam_ids.append(cam_id)
1977        bufs[cam_id] = {
1978            'raw': [],
1979            'raw10': [],
1980            'raw12': [],
1981            'rawStats': [],
1982            'dng': [],
1983            'jpeg': [],
1984            'jpeg_r': [],
1985            'y8': [],
1986            'rawQuadBayer': [],
1987            'rawQuadBayerStats': [],
1988            'raw10Stats': [],
1989            'raw10QuadBayerStats': [],
1990            'raw10QuadBayer': [],
1991        }
1992
1993    for cam_id in cam_ids:
1994       # Only allow yuv output to multiple targets
1995      if cam_id == self._camera_id:
1996        yuv_surfaces = [
1997            s for s in requested_surfaces
1998            if s['format'] == 'yuv' and 'physicalCamera' not in s
1999        ]
2000        formats_for_id = [
2001            s['format']
2002            for s in requested_surfaces
2003            if 'physicalCamera' not in s
2004        ]
2005      else:
2006        yuv_surfaces = [
2007            s for s in requested_surfaces if s['format'] == 'yuv' and
2008            'physicalCamera' in s and s['physicalCamera'] == cam_id
2009        ]
2010        formats_for_id = [
2011            s['format']
2012            for s in requested_surfaces
2013            if 'physicalCamera' in s and s['physicalCamera'] == cam_id
2014        ]
2015
2016      n_yuv = len(yuv_surfaces)
2017      # Compute the buffer size of YUV targets
2018      yuv_maxsize_1d = 0
2019      for s in yuv_surfaces:
2020        if ('width' not in s and 'height' not in s):
2021          if self.props is None:
2022            raise error_util.CameraItsError('Camera props are unavailable')
2023          yuv_maxsize_2d = capture_request_utils.get_available_output_sizes(
2024              'yuv', self.props)[0]
2025          # YUV420 size = 1.5 bytes per pixel
2026          yuv_maxsize_1d = (yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3) // 2
2027          break
2028      yuv_sizes = [
2029          (c['width'] * c['height'] * 3) // 2
2030          if 'width' in c and 'height' in c else yuv_maxsize_1d
2031          for c in yuv_surfaces
2032      ]
2033      # Currently we don't pass enough metadata from ItsService to distinguish
2034      # different yuv stream of same buffer size
2035      if len(yuv_sizes) != len(set(yuv_sizes)):
2036        raise error_util.CameraItsError(
2037            'ITS does not support yuv outputs of same buffer size')
2038      if len(formats_for_id) > len(set(formats_for_id)):
2039        if n_yuv != len(formats_for_id) - len(set(formats_for_id)) + 1:
2040          raise error_util.CameraItsError('Duplicate format requested')
2041
2042      yuv_bufs[cam_id] = {size: [] for size in yuv_sizes}
2043
2044    logging.debug('yuv bufs: %s', yuv_bufs)
2045    raw_formats = 0
2046    raw_formats += 1 if 'dng' in formats else 0
2047    raw_formats += 1 if 'raw' in formats else 0
2048    raw_formats += 1 if 'raw10' in formats else 0
2049    raw_formats += 1 if 'raw12' in formats else 0
2050    raw_formats += 1 if 'rawStats' in formats else 0
2051    raw_formats += 1 if 'rawQuadBayer' in formats else 0
2052    raw_formats += 1 if 'rawQuadBayerStats' in formats else 0
2053    raw_formats += 1 if 'raw10Stats' in formats else 0
2054    raw_formats += 1 if 'raw10QuadBayer' in formats else 0
2055    raw_formats += 1 if 'raw10QuadBayerStats' in formats else 0
2056
2057    if raw_formats > 1:
2058      raise error_util.CameraItsError('Different raw formats not supported')
2059
2060    # Detect long exposure time and set timeout accordingly
2061    longest_exp_time = 0
2062    for req in cmd['captureRequests']:
2063      if 'android.sensor.exposureTime' in req and req[
2064          'android.sensor.exposureTime'] > longest_exp_time:
2065        longest_exp_time = req['android.sensor.exposureTime']
2066
2067    extended_timeout = longest_exp_time // self.SEC_TO_NSEC + self.SOCK_TIMEOUT
2068    if repeat_request:
2069      extended_timeout += self.EXTRA_SOCK_TIMEOUT
2070    self.sock.settimeout(extended_timeout)
2071
2072    logging.debug('Capturing %d frame%s with %d format%s [%s]', ncap,
2073                  's' if ncap > 1 else '', nsurf, 's' if nsurf > 1 else '',
2074                  ','.join(formats))
2075    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2076
2077    # Wait for ncap*nsurf images and ncap metadata responses.
2078    # Assume that captures come out in the same order as requested in
2079    # the burst, however individual images of different formats can come
2080    # out in any order for that capture.
2081    nbufs = 0
2082    mds = []
2083    physical_mds = []
2084    widths = None
2085    heights = None
2086    camera_id = (
2087        self._camera_id
2088        if not self._hidden_physical_id
2089        else self._hidden_physical_id
2090    )
2091    logging.debug('Using camera_id %s to store buffers', camera_id)
2092    while nbufs < ncap * nsurf or len(mds) < ncap:
2093      json_obj, buf = self.__read_response_from_socket()
2094      if (json_obj[_TAG_STR] in ItsSession.IMAGE_FORMAT_LIST_1 and
2095          buf is not None):
2096        fmt = json_obj[_TAG_STR][:-5]
2097        bufs[camera_id][fmt].append(buf)
2098        nbufs += 1
2099      # Physical camera is appended to the tag string of a private capture
2100      elif json_obj[_TAG_STR].startswith('privImage'):
2101        # The private image format buffers are opaque to camera clients
2102        # and cannot be accessed.
2103        nbufs += 1
2104      elif json_obj[_TAG_STR] == 'yuvImage':
2105        buf_size = get_array_size(buf)
2106        yuv_bufs[camera_id][buf_size].append(buf)
2107        nbufs += 1
2108      elif json_obj[_TAG_STR] == 'captureResults':
2109        mds.append(json_obj[_OBJ_VALUE_STR]['captureResult'])
2110        physical_mds.append(json_obj[_OBJ_VALUE_STR]['physicalResults'])
2111        outputs = json_obj[_OBJ_VALUE_STR]['outputs']
2112        widths = [out['width'] for out in outputs]
2113        heights = [out['height'] for out in outputs]
2114      else:
2115        tag_string = unicodedata.normalize('NFKD', json_obj[_TAG_STR]).encode(
2116            'ascii', 'ignore')
2117        for x in ItsSession.IMAGE_FORMAT_LIST_2:
2118          x = bytes(x, encoding='utf-8')
2119          if tag_string.startswith(x):
2120            if x == b'yuvImage':
2121              physical_id = json_obj[_TAG_STR][len(x):]
2122              if physical_id in cam_ids:
2123                buf_size = get_array_size(buf)
2124                yuv_bufs[physical_id][buf_size].append(buf)
2125                nbufs += 1
2126            else:
2127              physical_id = json_obj[_TAG_STR][len(x):]
2128              if physical_id in cam_ids:
2129                fmt = x[:-5].decode('UTF-8')
2130                bufs[physical_id][fmt].append(buf)
2131                nbufs += 1
2132    rets = []
2133    for j, fmt in enumerate(formats):
2134      objs = []
2135      if 'physicalCamera' in requested_surfaces[j]:
2136        cam_id = requested_surfaces[j]['physicalCamera']
2137      else:
2138        cam_id = self._camera_id
2139
2140      for i in range(ncap):
2141        obj = {}
2142        obj['width'] = widths[j]
2143        obj['height'] = heights[j]
2144        obj['format'] = fmt
2145        if cam_id == self._camera_id:
2146          obj['metadata'] = mds[i]
2147        else:
2148          for physical_md in physical_mds[i]:
2149            if cam_id in physical_md:
2150              obj['metadata'] = physical_md[cam_id]
2151              break
2152
2153        if fmt == 'yuv':
2154          buf_size = (widths[j] * heights[j] * 3) // 2
2155          obj['data'] = yuv_bufs[cam_id][buf_size][i]
2156        elif fmt != 'priv':
2157          obj['data'] = bufs[cam_id][fmt][i]
2158        objs.append(obj)
2159      rets.append(objs if ncap > 1 else objs[0])
2160    self.sock.settimeout(self.SOCK_TIMEOUT)
2161    if len(rets) > 1 or (isinstance(rets[0], dict) and
2162                         isinstance(cap_request, list)):
2163      return rets
2164    else:
2165      return rets[0]
2166
2167  def do_vibrate(self, pattern):
2168    """Cause the device to vibrate to a specific pattern.
2169
2170    Args:
2171      pattern: Durations (ms) for which to turn on or off the vibrator.
2172      The first value indicates the number of milliseconds to wait
2173      before turning the vibrator on. The next value indicates the
2174      number of milliseconds for which to keep the vibrator on
2175      before turning it off. Subsequent values alternate between
2176      durations in milliseconds to turn the vibrator off or to turn
2177      the vibrator on.
2178
2179    Returns:
2180      Nothing.
2181    """
2182    cmd = {}
2183    cmd[_CMD_NAME_STR] = 'doVibrate'
2184    cmd['pattern'] = pattern
2185    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2186    data, _ = self.__read_response_from_socket()
2187    if data[_TAG_STR] != 'vibrationStarted':
2188      raise error_util.CameraItsError('Invalid response for command: %s' %
2189                                      cmd[_CMD_NAME_STR])
2190
2191  def set_audio_restriction(self, mode):
2192    """Set the audio restriction mode for this camera device.
2193
2194    Args:
2195     mode: int; the audio restriction mode. See CameraDevice.java for valid
2196     value.
2197    Returns:
2198     Nothing.
2199    """
2200    cmd = {}
2201    cmd[_CMD_NAME_STR] = 'setAudioRestriction'
2202    cmd['mode'] = mode
2203    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2204    data, _ = self.__read_response_from_socket()
2205    if data[_TAG_STR] != 'audioRestrictionSet':
2206      raise error_util.CameraItsError('Invalid response for command: %s' %
2207                                      cmd[_CMD_NAME_STR])
2208
2209  # pylint: disable=dangerous-default-value
2210  def do_3a(self,
2211            regions_ae=[[0, 0, 1, 1, 1]],
2212            regions_awb=[[0, 0, 1, 1, 1]],
2213            regions_af=[[0, 0, 1, 1, 1]],
2214            do_awb=True,
2215            do_af=True,
2216            lock_ae=False,
2217            lock_awb=False,
2218            get_results=False,
2219            ev_comp=0,
2220            auto_flash=False,
2221            mono_camera=False,
2222            zoom_ratio=None,
2223            out_surfaces=None,
2224            repeat_request=None,
2225            first_surface_for_3a=False,
2226            flash_mode=_FLASH_MODE_OFF):
2227    """Perform a 3A operation on the device.
2228
2229    Triggers some or all of AE, AWB, and AF, and returns once they have
2230    converged. Uses the vendor 3A that is implemented inside the HAL.
2231    Note: do_awb is always enabled regardless of do_awb flag
2232
2233    Throws an assertion if 3A fails to converge.
2234
2235    Args:
2236      regions_ae: List of weighted AE regions.
2237      regions_awb: List of weighted AWB regions.
2238      regions_af: List of weighted AF regions.
2239      do_awb: Wait for AWB to converge.
2240      do_af: Trigger AF and wait for it to converge.
2241      lock_ae: Request AE lock after convergence, and wait for it.
2242      lock_awb: Request AWB lock after convergence, and wait for it.
2243      get_results: Return the 3A results from this function.
2244      ev_comp: An EV compensation value to use when running AE.
2245      auto_flash: AE control boolean to enable auto flash.
2246      mono_camera: Boolean for monochrome camera.
2247      zoom_ratio: Zoom ratio. None if default zoom
2248      out_surfaces: dict; see do_capture() for specifications on out_surfaces.
2249        CameraCaptureSession will only be reused if out_surfaces is specified.
2250      repeat_request: repeating request list.
2251        See do_capture() for specifications on repeat_request.
2252      first_surface_for_3a: Use first surface in output_surfaces for 3A.
2253        Only applicable if out_surfaces contains at least 1 surface.
2254      flash_mode: FLASH_MODE to be used during 3A
2255        0: OFF
2256        1: SINGLE
2257        2: TORCH
2258
2259      Region format in args:
2260         Arguments are lists of weighted regions; each weighted region is a
2261         list of 5 values, [x, y, w, h, wgt], and each argument is a list of
2262         these 5-value lists. The coordinates are given as normalized
2263         rectangles (x, y, w, h) specifying the region. For example:
2264         [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]].
2265         Weights are non-negative integers.
2266
2267    Returns:
2268      Five values are returned if get_results is true:
2269      * AE sensitivity;
2270      * AE exposure time;
2271      * AWB gains (list);
2272      * AWB transform (list);
2273      * AF focus position; None if do_af is false
2274      Otherwise, it returns five None values.
2275    """
2276    logging.debug('Running vendor 3A on device')
2277    cmd = {}
2278    cmd[_CMD_NAME_STR] = 'do3A'
2279    reuse_session = False
2280    if out_surfaces:
2281      reuse_session = True
2282      if isinstance(out_surfaces, list):
2283        cmd['outputSurfaces'] = out_surfaces
2284      else:
2285        cmd['outputSurfaces'] = [out_surfaces]
2286    if repeat_request is None:
2287      cmd['repeatRequests'] = []
2288    elif not isinstance(repeat_request, list):
2289      cmd['repeatRequests'] = [repeat_request]
2290    else:
2291      cmd['repeatRequests'] = repeat_request
2292
2293    cmd['regions'] = {
2294        'ae': sum(regions_ae, []),
2295        'awb': sum(regions_awb, []),
2296        'af': sum(regions_af, [])
2297    }
2298    do_ae = True  # Always run AE
2299    cmd['triggers'] = {'ae': do_ae, 'af': do_af}
2300    if lock_ae:
2301      cmd['aeLock'] = True
2302    if lock_awb:
2303      cmd['awbLock'] = True
2304    if ev_comp != 0:
2305      cmd['evComp'] = ev_comp
2306    if flash_mode != 0:
2307      cmd['flashMode'] = flash_mode
2308    if auto_flash:
2309      cmd['autoFlash'] = True
2310    if self._hidden_physical_id:
2311      cmd['physicalId'] = self._hidden_physical_id
2312    if zoom_ratio:
2313      if self.zoom_ratio_within_range(zoom_ratio):
2314        cmd['zoomRatio'] = zoom_ratio
2315      else:
2316        raise AssertionError(f'Zoom ratio {zoom_ratio} out of range')
2317    cmd['reuseSession'] = reuse_session
2318    cmd['firstSurfaceFor3A'] = first_surface_for_3a
2319    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2320
2321    # Wait for each specified 3A to converge.
2322    ae_sens = None
2323    ae_exp = None
2324    awb_gains = None
2325    awb_transform = None
2326    af_dist = None
2327    converged = False
2328    while True:
2329      data, _ = self.__read_response_from_socket()
2330      vals = data[_STR_VALUE_STR].split()
2331      if data[_TAG_STR] == 'aeResult':
2332        if do_ae:
2333          ae_sens, ae_exp = [int(i) for i in vals]
2334      elif data[_TAG_STR] == 'afResult':
2335        if do_af:
2336          af_dist = float(vals[0])
2337      elif data[_TAG_STR] == 'awbResult':
2338        awb_gains = [float(f) for f in vals[:4]]
2339        awb_transform = [float(f) for f in vals[4:]]
2340      elif data[_TAG_STR] == '3aConverged':
2341        converged = True
2342      elif data[_TAG_STR] == '3aDone':
2343        break
2344      else:
2345        raise error_util.CameraItsError('Invalid command response')
2346    if converged and not get_results:
2347      return None, None, None, None, None
2348    if (do_ae and ae_sens is None or
2349        (not mono_camera and do_awb and awb_gains is None) or
2350        do_af and af_dist is None or not converged):
2351      raise error_util.CameraItsError('3A failed to converge')
2352    return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
2353
2354  def calc_camera_fov(self, props):
2355    """Determine the camera field of view from internal params.
2356
2357    Args:
2358      props: Camera properties object.
2359
2360    Returns:
2361      camera_fov: string; field of view for camera.
2362    """
2363
2364    focal_ls = props['android.lens.info.availableFocalLengths']
2365    if len(focal_ls) > 1:
2366      logging.debug('Doing capture to determine logical camera focal length')
2367      cap = self.do_capture(capture_request_utils.auto_capture_request())
2368      focal_l = cap['metadata']['android.lens.focalLength']
2369    else:
2370      focal_l = focal_ls[0]
2371
2372    sensor_size = props['android.sensor.info.physicalSize']
2373    diag = math.sqrt(sensor_size['height']**2 + sensor_size['width']**2)
2374    try:
2375      fov = str(round(2 * math.degrees(math.atan(diag / (2 * focal_l))), 2))
2376    except ValueError:
2377      fov = str(0)
2378    logging.debug('Calculated FoV: %s', fov)
2379    return fov
2380
2381  def get_file_name_to_load(self, chart_distance, camera_fov, scene):
2382    """Get the image to load on the tablet depending on fov and chart_distance.
2383
2384    Args:
2385     chart_distance: float; distance in cm from camera of displayed chart
2386     camera_fov: float; camera field of view.
2387     scene: String; Scene to be used in the test.
2388
2389    Returns:
2390     file_name: file name to display on the tablet.
2391
2392    """
2393    chart_scaling = opencv_processing_utils.calc_chart_scaling(
2394        chart_distance, camera_fov)
2395    if math.isclose(
2396        chart_scaling,
2397        opencv_processing_utils.SCALE_WIDE_IN_22CM_RIG,
2398        abs_tol=SCALING_TO_FILE_ATOL):
2399      file_name = f'{scene}_{opencv_processing_utils.SCALE_WIDE_IN_22CM_RIG}x_scaled.png'
2400    elif math.isclose(
2401        chart_scaling,
2402        opencv_processing_utils.SCALE_TELE_IN_22CM_RIG,
2403        abs_tol=SCALING_TO_FILE_ATOL):
2404      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE_IN_22CM_RIG}x_scaled.png'
2405    elif math.isclose(
2406        chart_scaling,
2407        opencv_processing_utils.SCALE_TELE25_IN_31CM_RIG,
2408        abs_tol=SCALING_TO_FILE_ATOL):
2409      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE25_IN_31CM_RIG}x_scaled.png'
2410    elif math.isclose(
2411        chart_scaling,
2412        opencv_processing_utils.SCALE_TELE40_IN_31CM_RIG,
2413        abs_tol=SCALING_TO_FILE_ATOL):
2414      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE40_IN_31CM_RIG}x_scaled.png'
2415    elif math.isclose(
2416        chart_scaling,
2417        opencv_processing_utils.SCALE_TELE_IN_31CM_RIG,
2418        abs_tol=SCALING_TO_FILE_ATOL):
2419      file_name = f'{scene}_{opencv_processing_utils.SCALE_TELE_IN_31CM_RIG}x_scaled.png'
2420    else:
2421      file_name = f'{scene}.png'
2422    logging.debug('Scene to load: %s', file_name)
2423    return file_name
2424
2425  def is_stream_combination_supported(self, out_surfaces, settings=None):
2426    """Query whether out_surfaces combination and settings are supported by the camera device.
2427
2428    This function hooks up to the isSessionConfigurationSupported()/
2429    isSessionConfigurationWithSettingsSupported() camera API
2430    to query whether a particular stream combination and settings are supported.
2431
2432    Args:
2433      out_surfaces: dict; see do_capture() for specifications on out_surfaces.
2434      settings: dict; optional capture request settings metadata.
2435
2436    Returns:
2437      Boolean
2438    """
2439    cmd = {}
2440    cmd[_CMD_NAME_STR] = 'isStreamCombinationSupported'
2441    cmd[_CAMERA_ID_STR] = self._camera_id
2442
2443    if isinstance(out_surfaces, list):
2444      cmd['outputSurfaces'] = out_surfaces
2445      for out_surface in out_surfaces:
2446        if self._hidden_physical_id:
2447          out_surface['physicalCamera'] = self._hidden_physical_id
2448    else:
2449      cmd['outputSurfaces'] = [out_surfaces]
2450      if self._hidden_physical_id:
2451        out_surfaces['physicalCamera'] = self._hidden_physical_id
2452
2453    if settings:
2454      cmd['settings'] = settings
2455
2456    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2457
2458    data, _ = self.__read_response_from_socket()
2459    if data[_TAG_STR] != 'streamCombinationSupport':
2460      raise error_util.CameraItsError('Failed to query stream combination')
2461
2462    return data[_STR_VALUE_STR] == 'supportedCombination'
2463
2464  def is_camera_privacy_mode_supported(self):
2465    """Query whether the mobile device supports camera privacy mode.
2466
2467    This function checks whether the mobile device has FEATURE_CAMERA_TOGGLE
2468    feature support, which indicates the camera device can run in privacy mode.
2469
2470    Returns:
2471      Boolean
2472    """
2473    cmd = {}
2474    cmd[_CMD_NAME_STR] = 'isCameraPrivacyModeSupported'
2475    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2476
2477    data, _ = self.__read_response_from_socket()
2478    if data[_TAG_STR] != 'cameraPrivacyModeSupport':
2479      raise error_util.CameraItsError('Failed to query camera privacy mode'
2480                                      ' support')
2481    return data[_STR_VALUE_STR] == 'true'
2482
2483  def is_primary_camera(self):
2484    """Query whether the camera device is a primary rear/front camera.
2485
2486    A primary rear/front facing camera is a camera device with the lowest
2487    camera Id for that facing.
2488
2489    Returns:
2490      Boolean
2491    """
2492    cmd = {}
2493    cmd[_CMD_NAME_STR] = 'isPrimaryCamera'
2494    cmd[_CAMERA_ID_STR] = self._camera_id
2495    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2496
2497    data, _ = self.__read_response_from_socket()
2498    if data[_TAG_STR] != 'primaryCamera':
2499      raise error_util.CameraItsError('Failed to query primary camera')
2500    return data[_STR_VALUE_STR] == 'true'
2501
2502  def is_performance_class(self):
2503    """Query whether the mobile device is an R or S performance class device.
2504
2505    Returns:
2506      Boolean
2507    """
2508    cmd = {}
2509    cmd[_CMD_NAME_STR] = 'isPerformanceClass'
2510    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2511
2512    data, _ = self.__read_response_from_socket()
2513    if data[_TAG_STR] != 'performanceClass':
2514      raise error_util.CameraItsError('Failed to query performance class')
2515    return data[_STR_VALUE_STR] == 'true'
2516
2517  def is_vic_performance_class(self):
2518    """Return whether the mobile device is VIC performance class device.
2519    """
2520    cmd = {}
2521    cmd[_CMD_NAME_STR] = 'isVicPerformanceClass'
2522    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2523
2524    data, _ = self.__read_response_from_socket()
2525    if data[_TAG_STR] != 'vicPerformanceClass':
2526      raise error_util.CameraItsError('Failed to query performance class')
2527    return data[_STR_VALUE_STR] == 'true'
2528
2529  def measure_camera_launch_ms(self):
2530    """Measure camera launch latency in millisecond, from open to first frame.
2531
2532    Returns:
2533      Camera launch latency from camera open to receipt of first frame
2534    """
2535    cmd = {}
2536    cmd[_CMD_NAME_STR] = 'measureCameraLaunchMs'
2537    cmd[_CAMERA_ID_STR] = self._camera_id
2538    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2539
2540    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
2541    self.sock.settimeout(timeout)
2542    data, _ = self.__read_response_from_socket()
2543    self.sock.settimeout(self.SOCK_TIMEOUT)
2544
2545    if data[_TAG_STR] != 'cameraLaunchMs':
2546      raise error_util.CameraItsError('Failed to measure camera launch latency')
2547    return float(data[_STR_VALUE_STR])
2548
2549  def measure_camera_1080p_jpeg_capture_ms(self):
2550    """Measure camera 1080P jpeg capture latency in milliseconds.
2551
2552    Returns:
2553      Camera jpeg capture latency in milliseconds
2554    """
2555    cmd = {}
2556    cmd[_CMD_NAME_STR] = 'measureCamera1080pJpegCaptureMs'
2557    cmd[_CAMERA_ID_STR] = self._camera_id
2558    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2559
2560    timeout = self.SOCK_TIMEOUT_FOR_PERF_MEASURE
2561    self.sock.settimeout(timeout)
2562    data, _ = self.__read_response_from_socket()
2563    self.sock.settimeout(self.SOCK_TIMEOUT)
2564
2565    if data[_TAG_STR] != 'camera1080pJpegCaptureMs':
2566      raise error_util.CameraItsError(
2567          'Failed to measure camera 1080p jpeg capture latency')
2568    return float(data[_STR_VALUE_STR])
2569
2570  def _camera_id_to_props(self):
2571    """Return the properties of each camera ID."""
2572    unparsed_ids = self.get_camera_ids().get('cameraIdArray', [])
2573    parsed_ids = parse_camera_ids(unparsed_ids)
2574    id_to_props = {}
2575    for unparsed_id, id_combo in zip(unparsed_ids, parsed_ids):
2576      if id_combo.sub_id is None:
2577        props = self.get_camera_properties_by_id(id_combo.id)
2578      else:
2579        props = self.get_camera_properties_by_id(id_combo.sub_id)
2580      id_to_props[unparsed_id] = props
2581    if not id_to_props:
2582      raise AssertionError('No camera IDs were found.')
2583    return id_to_props
2584
2585  def has_ultrawide_camera(self, facing):
2586    """Return if device has an ultrawide camera facing the same direction.
2587
2588    Args:
2589      facing: constant describing the direction the camera device lens faces.
2590
2591    Returns:
2592      True if the device has an ultrawide camera facing in that direction.
2593    """
2594    camera_ids = self.get_camera_ids()
2595    primary_rear_camera_id = camera_ids.get('primaryRearCameraId', '')
2596    primary_front_camera_id = camera_ids.get('primaryFrontCameraId', '')
2597    if facing == camera_properties_utils.LENS_FACING['BACK']:
2598      primary_camera_id = primary_rear_camera_id
2599    elif facing == camera_properties_utils.LENS_FACING['FRONT']:
2600      primary_camera_id = primary_front_camera_id
2601    else:
2602      raise NotImplementedError('Cameras not facing either front or back '
2603                                'are currently unsupported.')
2604    id_to_props = self._camera_id_to_props()
2605    fov_and_facing = collections.namedtuple('FovAndFacing', ['fov', 'facing'])
2606    id_to_fov_facing = {
2607        unparsed_id: fov_and_facing(
2608            self.calc_camera_fov(props), props['android.lens.facing']
2609        )
2610        for unparsed_id, props in id_to_props.items()
2611    }
2612    logging.debug('IDs to (FOVs, facing): %s', id_to_fov_facing)
2613    primary_camera_fov, primary_camera_facing = id_to_fov_facing[
2614        primary_camera_id]
2615    for unparsed_id, fov_facing_combo in id_to_fov_facing.items():
2616      if (float(fov_facing_combo.fov) > float(primary_camera_fov) and
2617          fov_facing_combo.facing == primary_camera_facing and
2618          unparsed_id != primary_camera_id):
2619        logging.debug('Ultrawide camera found with ID %s and FoV %.3f. '
2620                      'Primary camera has ID %s and FoV: %.3f.',
2621                      unparsed_id, float(fov_facing_combo.fov),
2622                      primary_camera_id, float(primary_camera_fov))
2623        return True
2624    return False
2625
2626  def get_facing_to_ids(self):
2627    """Returns mapping from lens facing to list of corresponding camera IDs."""
2628    id_to_props = self._camera_id_to_props()
2629    facing_to_ids = collections.defaultdict(list)
2630    for unparsed_id, props in id_to_props.items():
2631      facing_to_ids[props['android.lens.facing']].append(unparsed_id)
2632    for ids in facing_to_ids.values():
2633      ids.sort()
2634    logging.debug('Facing to camera IDs: %s', facing_to_ids)
2635    return facing_to_ids
2636
2637  def is_low_light_boost_available(self, camera_id, extension=-1):
2638    """Checks if low light boost is available for camera id and extension.
2639
2640    If the extension is not provided (or -1) then low light boost support is
2641    checked for a camera2 session.
2642
2643    Args:
2644      camera_id: int; device ID
2645      extension: int; extension type
2646    Returns:
2647      True if low light boost is available and false otherwise.
2648    """
2649    cmd = {
2650        'cmdName': 'isLowLightBoostAvailable',
2651        'cameraId': camera_id,
2652        'extension': extension
2653    }
2654    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2655    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
2656    self.sock.settimeout(timeout)
2657    data, _ = self.__read_response_from_socket()
2658    if data['tag'] != 'isLowLightBoostAvailable':
2659      raise error_util.CameraItsError('Invalid command response')
2660    return data[_STR_VALUE_STR] == 'true'
2661
2662  def do_capture_preview_frame(self,
2663                               camera_id,
2664                               preview_size,
2665                               frame_num=0,
2666                               extension=-1,
2667                               cap_request={}):
2668    """Captures the nth preview frame from the preview stream.
2669
2670    By default the 0th frame is the first frame. The extension type can also be
2671    provided or -1 to use Camera2 which is the default.
2672
2673    Args:
2674      camera_id: int; device ID
2675      preview_size: int; preview size
2676      frame_num: int; frame number to capture
2677      extension: int; extension type
2678      cap_request: dict; python dict specifying the key/value pair of capture
2679        request keys, which will be converted to JSON and sent to the device.
2680    Returns:
2681      Single JPEG frame capture as numpy array of bytes
2682    """
2683    cmd = {
2684        'cmdName': 'doCapturePreviewFrame',
2685        'cameraId': camera_id,
2686        'previewSize': preview_size,
2687        'frameNum': frame_num,
2688        'extension': extension,
2689        'captureRequest': cap_request,
2690    }
2691    self.sock.send(json.dumps(cmd).encode() + '\n'.encode())
2692    timeout = self.SOCK_TIMEOUT + self.EXTRA_SOCK_TIMEOUT
2693    self.sock.settimeout(timeout)
2694    data, buf = self.__read_response_from_socket()
2695    if data[_TAG_STR] != 'jpegImage':
2696      raise error_util.CameraItsError('Invalid command response')
2697    return buf
2698
2699  def preview_surface(self, size, hlg10_enabled=False):
2700    """Create a surface dictionary based on size and hdr-ness.
2701
2702    Args:
2703      size: str, Resolution of an output surface. ex. "1920x1080"
2704      hlg10_enabled: boolean; Whether the output is hlg10 or not.
2705
2706    Returns:
2707      a dictionary object containing format, size, and hdr-ness.
2708    """
2709    surface = {
2710        'format': 'priv',
2711        'width': int(size.split('x')[0]),
2712        'height': int(size.split('x')[1]),
2713        'hlg10': hlg10_enabled
2714    }
2715    if self._hidden_physical_id:
2716      surface['physicalCamera'] = self._hidden_physical_id
2717    return [surface]
2718
2719
2720def parse_camera_ids(ids):
2721  """Parse the string of camera IDs into array of CameraIdCombo tuples.
2722
2723  Args:
2724   ids: List of camera ids.
2725
2726  Returns:
2727   Array of CameraIdCombo
2728  """
2729  camera_id_combo = collections.namedtuple('CameraIdCombo', ['id', 'sub_id'])
2730  id_combos = []
2731  for one_id in ids:
2732    one_combo = one_id.split(SUB_CAMERA_SEPARATOR)
2733    if len(one_combo) == 1:
2734      id_combos.append(camera_id_combo(one_combo[0], None))
2735    elif len(one_combo) == 2:
2736      id_combos.append(camera_id_combo(one_combo[0], one_combo[1]))
2737    else:
2738      raise AssertionError('Camera id parameters must be either ID or '
2739                           f'ID{SUB_CAMERA_SEPARATOR}SUB_ID')
2740  return id_combos
2741
2742
2743def do_capture_with_latency(cam, req, sync_latency, fmt=None):
2744  """Helper function to take enough frames to allow sync latency.
2745
2746  Args:
2747    cam: camera object
2748    req: request for camera
2749    sync_latency: integer number of frames
2750    fmt: format for the capture
2751  Returns:
2752    single capture with the unsettled frames discarded
2753  """
2754  caps = cam.do_capture([req]*(sync_latency+1), fmt)
2755  return caps[-1]
2756
2757
2758def load_scene(cam, props, scene, tablet, chart_distance, lighting_check=True,
2759               log_path=None):
2760  """Load the scene for the camera based on the FOV.
2761
2762  Args:
2763    cam: camera object
2764    props: camera properties
2765    scene: scene to be loaded
2766    tablet: tablet to load scene on
2767    chart_distance: distance to tablet
2768    lighting_check: Boolean for lighting check enabled
2769    log_path: [Optional] path to store artifacts
2770  """
2771  if not tablet:
2772    logging.info('Manual run: no tablet to load scene on.')
2773    return
2774  # Calculate camera_fov, which determines the image/video to load on tablet.
2775  camera_fov = cam.calc_camera_fov(props)
2776  file_name = cam.get_file_name_to_load(chart_distance, camera_fov, scene)
2777  if 'scene' not in file_name:
2778    file_name = f'scene{file_name}'
2779  if scene in VIDEO_SCENES:
2780    root_file_name, _ = os.path.splitext(file_name)
2781    file_name = root_file_name + '.mp4'
2782  logging.debug('Displaying %s on the tablet', file_name)
2783
2784  # Display the image/video on the tablet using the default media player.
2785  view_file_type = 'image/png' if scene not in VIDEO_SCENES else 'video/mp4'
2786  uri_prefix = 'file://mnt' if scene not in VIDEO_SCENES else ''
2787  tablet.adb.shell(
2788      f'am start -a android.intent.action.VIEW -t {view_file_type} '
2789      f'-d {uri_prefix}/sdcard/Download/{file_name}')
2790  time.sleep(LOAD_SCENE_DELAY_SEC)
2791  # Tap tablet to remove gallery buttons
2792  tablet.adb.shell(
2793      f'input tap {TAP_COORDINATES[0]} {TAP_COORDINATES[1]}')
2794  rfov_camera_in_rfov_box = (
2795      math.isclose(
2796          chart_distance,
2797          opencv_processing_utils.CHART_DISTANCE_31CM, rel_tol=0.1) and
2798      opencv_processing_utils.FOV_THRESH_TELE <= float(camera_fov)
2799      <= opencv_processing_utils.FOV_THRESH_UW)
2800  wfov_camera_in_wfov_box = (
2801      math.isclose(
2802          chart_distance,
2803          opencv_processing_utils.CHART_DISTANCE_22CM, rel_tol=0.1) and
2804      float(camera_fov) > opencv_processing_utils.FOV_THRESH_UW)
2805  if (rfov_camera_in_rfov_box or wfov_camera_in_wfov_box) and lighting_check:
2806    cam.do_3a()
2807    cap = cam.do_capture(
2808        capture_request_utils.auto_capture_request(), cam.CAP_YUV)
2809    y_plane, _, _ = image_processing_utils.convert_capture_to_planes(cap)
2810    validate_lighting(y_plane, scene, log_path=log_path, fov=float(camera_fov))
2811
2812
2813def copy_scenes_to_tablet(scene, tablet_id):
2814  """Copies scenes onto the tablet before running the tests.
2815
2816  Args:
2817    scene: Name of the scene to copy image files.
2818    tablet_id: device id of tablet
2819  """
2820  logging.info('Copying files to tablet: %s', tablet_id)
2821  scene_path = os.path.join(os.environ['CAMERA_ITS_TOP'], 'tests', scene)
2822  scene_dir = os.listdir(scene_path)
2823  for file_name in scene_dir:
2824    if file_name.endswith('.png') or file_name.endswith('.mp4'):
2825      src_scene_file = os.path.join(scene_path, file_name)
2826      cmd = f'adb -s {tablet_id} push {src_scene_file} {_DST_SCENE_DIR}'
2827      subprocess.Popen(cmd.split())
2828  time.sleep(_COPY_SCENE_DELAY_SEC)
2829  logging.info('Finished copying files to tablet.')
2830
2831
2832def validate_lighting(y_plane, scene, state='ON', log_path=None,
2833                      tablet_state='ON', fov=None):
2834  """Validates the lighting level in scene corners based on empirical values.
2835
2836  Args:
2837    y_plane: Y plane of YUV image
2838    scene: scene name
2839    state: string 'ON' or 'OFF'
2840    log_path: [Optional] path to store artifacts
2841    tablet_state: string 'ON' or 'OFF'
2842    fov: [Optional] float, calculated camera FoV
2843
2844  Returns:
2845    boolean True if lighting validated, else raise AssertionError
2846  """
2847  logging.debug('Validating lighting levels.')
2848  file_name = f'validate_lighting_{scene}.jpg'
2849  if log_path:
2850    file_name = os.path.join(log_path, f'validate_lighting_{scene}.jpg')
2851
2852  if tablet_state == 'OFF':
2853    validate_lighting_thresh = _VALIDATE_LIGHTING_THRESH_DARK
2854  else:
2855    validate_lighting_thresh = _VALIDATE_LIGHTING_THRESH
2856
2857  validate_lighting_regions = _VALIDATE_LIGHTING_REGIONS
2858  if fov and fov > _VALIDATE_LIGHTING_MACRO_FOV_THRESH:
2859    validate_lighting_regions = _VALIDATE_LIGHTING_REGIONS_MODULAR_UW
2860
2861  # Test patches from each corner.
2862  for location, coordinates in validate_lighting_regions.items():
2863    patch = image_processing_utils.get_image_patch(
2864        y_plane, coordinates[0], coordinates[1],
2865        _VALIDATE_LIGHTING_PATCH_W, _VALIDATE_LIGHTING_PATCH_H)
2866    y_mean = image_processing_utils.compute_image_means(patch)[0]
2867    logging.debug('%s corner Y mean: %.3f', location, y_mean)
2868    if state == 'ON':
2869      if y_mean > validate_lighting_thresh:
2870        logging.debug('Lights ON in test rig.')
2871        return True
2872      else:
2873        image_processing_utils.write_image(y_plane, file_name)
2874        raise AssertionError('Lights OFF in test rig. Turn ON and retry.')
2875    elif state == 'OFF':
2876      if y_mean < validate_lighting_thresh:
2877        logging.debug('Lights OFF in test rig.')
2878        return True
2879      else:
2880        image_processing_utils.write_image(y_plane, file_name)
2881        raise AssertionError('Lights ON in test rig. Turn OFF and retry.')
2882    else:
2883      raise AssertionError('Invalid lighting state string. '
2884                           "Valid strings: 'ON', 'OFF'.")
2885
2886
2887def get_build_fingerprint(device_id):
2888  """Return the build fingerprint of the device."""
2889  cmd = f'adb -s {device_id} shell getprop ro.build.fingerprint'
2890  try:
2891    build_fingerprint = subprocess.check_output(cmd.split()).decode('utf-8').strip()
2892    logging.debug('Build fingerprint: %s', build_fingerprint)
2893  except (subprocess.CalledProcessError, ValueError) as exp_errors:
2894    raise AssertionError('No build_fingerprint.') from exp_errors
2895  return build_fingerprint
2896
2897
2898def get_build_sdk_version(device_id):
2899  """Return the int build version of the device."""
2900  cmd = f'adb -s {device_id} shell getprop ro.build.version.sdk'
2901  try:
2902    build_sdk_version = int(subprocess.check_output(cmd.split()).rstrip())
2903    logging.debug('Build SDK version: %d', build_sdk_version)
2904  except (subprocess.CalledProcessError, ValueError) as exp_errors:
2905    raise AssertionError('No build_sdk_version.') from exp_errors
2906  return build_sdk_version
2907
2908
2909def get_first_api_level(device_id):
2910  """Return the int value for the first API level of the device."""
2911  cmd = f'adb -s {device_id} shell getprop ro.product.first_api_level'
2912  try:
2913    first_api_level = int(subprocess.check_output(cmd.split()).rstrip())
2914    logging.debug('First API level: %d', first_api_level)
2915  except (subprocess.CalledProcessError, ValueError):
2916    logging.error('No first_api_level. Setting to build version.')
2917    first_api_level = get_build_sdk_version(device_id)
2918  return first_api_level
2919
2920
2921def get_vendor_api_level(device_id):
2922  """Return the int value for the vendor API level of the device."""
2923  cmd = f'adb -s {device_id} shell getprop ro.vendor.api_level'
2924  try:
2925    vendor_api_level = int(subprocess.check_output(cmd.split()).rstrip())
2926    logging.debug('First vendor API level: %d', vendor_api_level)
2927  except (subprocess.CalledProcessError, ValueError):
2928    logging.error('No vendor_api_level. Setting to build version.')
2929    vendor_api_level = get_build_sdk_version(device_id)
2930  return vendor_api_level
2931
2932
2933def get_media_performance_class(device_id):
2934  """Return the int value for the media performance class of the device."""
2935  cmd = (f'adb -s {device_id} shell '
2936         'getprop ro.odm.build.media_performance_class')
2937  try:
2938    media_performance_class = int(
2939        subprocess.check_output(cmd.split()).rstrip())
2940    logging.debug('Media performance class: %d', media_performance_class)
2941  except (subprocess.CalledProcessError, ValueError):
2942    logging.debug('No media performance class. Setting to 0.')
2943    media_performance_class = 0
2944  return media_performance_class
2945
2946
2947def raise_mpc_assertion_error(required_mpc, test_name, found_mpc):
2948  raise AssertionError(f'With MPC >= {required_mpc}, {test_name} must be run. '
2949                       f'Found MPC: {found_mpc}')
2950
2951
2952def stop_video_playback(tablet):
2953  """Force-stop activities used for video playback on the tablet.
2954
2955  Args:
2956    tablet: a controller object for the ITS tablet.
2957  """
2958  try:
2959    activities_unencoded = tablet.adb.shell(
2960        ['dumpsys', 'activity', 'recents', '|',
2961         'grep', '"baseIntent=Intent.*act=android.intent.action"']
2962    )
2963  except adb.AdbError as e:
2964    logging.warning('ADB error when finding intent activities: %s. '
2965                    'Please close the default video player manually.', e)
2966    return
2967  activity_lines = (
2968      str(activities_unencoded.decode('utf-8')).strip().splitlines()
2969  )
2970  for activity_line in activity_lines:
2971    activity = activity_line.split('cmp=')[-1].split('/')[0]
2972    try:
2973      tablet.adb.shell(['am', 'force-stop', activity])
2974    except adb.AdbError as e:
2975      logging.warning('ADB error when killing intent activity %s: %s. '
2976                      'Please close the default video player manually.',
2977                      activity, e)
2978
2979
2980def raise_not_yet_mandated_error(message, api_level, mandated_api_level):
2981  if api_level >= mandated_api_level:
2982    raise AssertionError(
2983        f'Test is mandated for API level {mandated_api_level} or above. '
2984        f'Found API level {api_level}.\n\n{message}'
2985    )
2986  else:
2987    raise AssertionError(f'{NOT_YET_MANDATED_MESSAGE}\n\n{message}')
2988
2989
2990def pull_file_from_dut(dut, dut_path, log_folder):
2991  """Pulls and returns file from dut and return file name.
2992
2993  Args:
2994    dut: device under test
2995    dut_path: pull file from this path
2996    log_folder: store pulled file to this folder
2997
2998  Returns:
2999    filename of file pulled from dut
3000  """
3001  dut.adb.pull([dut_path, log_folder])
3002  file_name = (dut_path.split('/')[-1])
3003  logging.debug('%s pulled from dut', file_name)
3004  return file_name
3005
3006
3007def remove_tmp_files(log_path, match_pattern):
3008  """Remove temp file with given directory path.
3009
3010  Args:
3011    log_path: path-like object, path of directory
3012    match_pattern: string, pattern to be matched and removed
3013
3014  Returns:
3015    List of error messages if encountering error while removing files
3016  """
3017  temp_files = []
3018  try:
3019    temp_files = os.listdir(log_path)
3020  except FileNotFoundError:
3021    logging.debug('/tmp directory: %s not found', log_path)
3022  for file in temp_files:
3023    if fnmatch.fnmatch(file, match_pattern):
3024      file_to_remove = os.path.join(log_path, file)
3025      try:
3026        os.remove(file_to_remove)
3027      except FileNotFoundError:
3028        logging.debug('File not found: %s', str(file))
3029
3030
3031def remove_frame_files(dir_name, save_files_list=None):
3032  """Removes the generated frame files from test dir.
3033
3034  Args:
3035    dir_name: test directory name.
3036    save_files_list: list of files not to be removed. Default is empty list.
3037  """
3038  if os.path.exists(dir_name):
3039    for image in glob.glob('%s/*.png' % dir_name):
3040      if save_files_list is None or image not in save_files_list:
3041        os.remove(image)
3042
3043
3044def remove_file(file_name_with_path):
3045  """Removes file at given path.
3046
3047  Args:
3048    file_name_with_path: string, filename with path.
3049  """
3050  remove_mp4_file(file_name_with_path)
3051
3052
3053def remove_mp4_file(file_name_with_path):
3054  """Removes the mp4 file at given path.
3055
3056  Args:
3057    file_name_with_path: string, path to mp4 recording.
3058  """
3059  try:
3060    os.remove(file_name_with_path)
3061  except FileNotFoundError:
3062    logging.debug('File not found: %s', file_name_with_path)
3063
3064
3065def check_features_passed(
3066    features_passed, hlg10, is_stabilized):
3067  """Check if the [hlg10, is_stabilized] combination is already tested
3068  to be supported.
3069
3070  Args:
3071    features_passed: The list of feature combinations already supported
3072    hlg10: boolean; Whether HLG10 is enabled
3073    is_stabilized: boolean; Whether preview stabilizatoin is enabled
3074
3075  Returns:
3076    Whether the [hlg10, is_stabilized] is already tested to be supported.
3077  """
3078  feature_mask = 0
3079  if hlg10: feature_mask |= _BIT_HLG10
3080  if is_stabilized: feature_mask |= _BIT_STABILIZATION
3081  tested = False
3082  for tested_feature in features_passed:
3083    # Only test a combination if they aren't already a subset
3084    # of another tested combination.
3085    if (tested_feature | feature_mask) == tested_feature:
3086      tested = True
3087      break
3088  return tested
3089
3090
3091def mark_features_passed(
3092    features_passed, hlg10, is_stabilized):
3093  """Mark the [hlg10, is_stabilized] combination as tested to pass.
3094
3095  Args:
3096    features_passed: The list of feature combinations already tested
3097    hlg10: boolean; Whether HLG10 is enabled
3098    is_stabilized: boolean; Whether preview stabilizatoin is enabled
3099  """
3100  feature_mask = 0
3101  if hlg10: feature_mask |= _BIT_HLG10
3102  if is_stabilized: feature_mask |= _BIT_STABILIZATION
3103  features_passed.append(feature_mask)
3104