Qualcomm Camera HAL 2.0

我们知道在HAL的Vendor实现当中会动态去load一个名字为camera.$platform$.so的档案,然后去加载Android HAL当中定义的方法,这里以Camera HAL 2.0并且Qualcomm msm8960为例子看下,结合之前的一篇文章(http://guoh.org/lifelog/2013/07/glance-at-camera-hal-2-0/)。

(注:这篇文章已经草稿比较久了,但是一直没有发出来,因为手里的这版代码没有设备可以跑,另外也无法确定代码是否完全正确,至少发现了一些地方都是stub实现,文中可能存在一些错误,如发现不正确的地方欢迎指出,我也会尽量发现错误并修正!)

我们知道在camera2.h当中定义了很多方法,那么在msm8960 HAL就是在如下地方
/path/to/qcam-hal/QCamera/HAL2
这编译出来就是一个camera.$platform$.so,请看它的实现
首先是HAL2/wrapper/QualcommCamera.h|cpp

/**
 * The functions need to be provided by the camera HAL.
 *
 * If getNumberOfCameras() returns N, the valid cameraId for getCameraInfo()
 * and openCameraHardware() is 0 to N-1.
 */

static hw_module_methods_t camera_module_methods = {
    open: camera_device_open,
};

static hw_module_t camera_common  = {
    tag: HARDWARE_MODULE_TAG,
    module_api_version: CAMERA_MODULE_API_VERSION_2_0, // 这样Camera Service才会去初始化Camera2Client一系列
    hal_api_version: HARDWARE_HAL_API_VERSION,
    id: CAMERA_HARDWARE_MODULE_ID,
    name: "Qcamera",
    author:"Qcom",
    methods: &camera_module_methods,
    dso: NULL,
    reserved:  {0},
};

camera_module_t HAL_MODULE_INFO_SYM = { // 这个HMI,每个HAL模块都必须有的
    common: camera_common,
    get_number_of_cameras: get_number_of_cameras,
    get_camera_info: get_camera_info,
};

camera2_device_ops_t camera_ops = { // 注意这些绑定的函数
    set_request_queue_src_ops:           android::set_request_queue_src_ops,
    notify_request_queue_not_empty:      android::notify_request_queue_not_empty,
    set_frame_queue_dst_ops:             android::set_frame_queue_dst_ops,
    get_in_progress_count:               android::get_in_progress_count,
    flush_captures_in_progress:          android::flush_captures_in_progress,
    construct_default_request:           android::construct_default_request,

    allocate_stream:                     android::allocate_stream,
    register_stream_buffers:             android::register_stream_buffers,
    release_stream:                      android::release_stream,

    allocate_reprocess_stream:           android::allocate_reprocess_stream,
    allocate_reprocess_stream_from_stream: android::allocate_reprocess_stream_from_stream,
    release_reprocess_stream:            android::release_reprocess_stream,

    trigger_action:                      android::trigger_action,
    set_notify_callback:                 android::set_notify_callback,
    get_metadata_vendor_tag_ops:         android::get_metadata_vendor_tag_ops,
    dump:                                android::dump,
};

typedef struct { // 注意这个是Qualcomm自己定义的一个wrap结构
  camera2_device_t hw_dev; // 这里是标准的
  QCameraHardwareInterface *hardware;
  int camera_released;
  int cameraId;
} camera_hardware_t;

/* HAL should return NULL if it fails to open camera hardware. */
extern "C" int  camera_device_open(
  const struct hw_module_t* module, const char* id,
          struct hw_device_t** hw_device)
{
    int rc = -1;
    int mode = 0;
    camera2_device_t *device = NULL;
    if (module && id && hw_device) {
        int cameraId = atoi(id);

        if (!strcmp(module->name, camera_common.name)) {
            camera_hardware_t *camHal =
                (camera_hardware_t *) malloc(sizeof (camera_hardware_t));
            if (!camHal) {
                *hw_device = NULL;
	        	ALOGE("%s:  end in no mem", __func__);
				return rc;
	    	}
		    /* we have the camera_hardware obj malloced */
		    memset(camHal, 0, sizeof (camera_hardware_t));
		    camHal->hardware = new QCameraHardwareInterface(cameraId, mode);
		    if (camHal->hardware && camHal->hardware->isCameraReady()) {
				camHal->cameraId = cameraId;
		    	device = &camHal->hw_dev; // 这里camera2_device_t
		        device->common.close = close_camera_device; // 初始化camera2_device_t
		        device->common.version = CAMERA_DEVICE_API_VERSION_2_0;
		        device->ops = &camera_ops;
		        device->priv = (void *)camHal;
		        rc =  0;
		    } else {
		        if (camHal->hardware) {
		            delete camHal->hardware;
		            camHal->hardware = NULL;
		        }
		        free(camHal);
		        device = NULL;
		    }
        }
    }
    /* pass actual hw_device ptr to framework. This amkes that we actally be use memberof() macro */
    *hw_device = (hw_device_t*)&device->common; // 这就是kernel或者Android native framework常用的一招
    return rc;
}

看看allocate stream

int allocate_stream(const struct camera2_device *device,
        uint32_t width,
        uint32_t height,
        int      format,
        const camera2_stream_ops_t *stream_ops,
        uint32_t *stream_id,
        uint32_t *format_actual,
        uint32_t *usage,
        uint32_t *max_buffers)
{
    QCameraHardwareInterface *hardware = util_get_Hal_obj(device);
	hardware->allocate_stream(width, height, format, stream_ops,
            stream_id, format_actual, usage, max_buffers);
    return rc;
}

这里注意QCameraHardwareInterface在QCameraHWI.h|cpp当中

int QCameraHardwareInterface::allocate_stream(
    uint32_t width,
    uint32_t height, int format,
    const camera2_stream_ops_t *stream_ops,
    uint32_t *stream_id,
    uint32_t *format_actual,
    uint32_t *usage,
    uint32_t *max_buffers)
{
    int ret = OK;
    QCameraStream *stream = NULL;
    camera_mode_t myMode = (camera_mode_t)(CAMERA_MODE_2D|CAMERA_NONZSL_MODE);

    stream = QCameraStream_preview::createInstance(
                        mCameraHandle->camera_handle,
                        mChannelId,
                        width,
                        height,
                        format,
                        mCameraHandle,
                        myMode);

    stream->setPreviewWindow(stream_ops); // 这里,也就是只要通过该方法创建的stream,都会有对应的ANativeWindow进来
    *stream_id = stream->getStreamId();
    *max_buffers= stream->getMaxBuffers(); // 从HAL得到的
    *usage = GRALLOC_USAGE_HW_CAMERA_WRITE | CAMERA_GRALLOC_HEAP_ID
        | CAMERA_GRALLOC_FALLBACK_HEAP_ID;
    /* Set to an arbitrary format SUPPORTED by gralloc */
    *format_actual = HAL_PIXEL_FORMAT_YCrCb_420_SP;

    return ret;
}

QCameraStream_preview::createInstance直接调用自己的构造方法,也就是下面
(相关class在QCameraStream.h|cpp和QCameraStream_Preview.cpp)

QCameraStream_preview::QCameraStream_preview(uint32_t CameraHandle,
                        uint32_t ChannelId,
                        uint32_t Width,
                        uint32_t Height,
                        int requestedFormat,
                        mm_camera_vtbl_t *mm_ops,
                        camera_mode_t mode) :
                 QCameraStream(CameraHandle,
                        ChannelId,
                        Width,
                        Height,
                        mm_ops,
                        mode),
                 mLastQueuedFrame(NULL),
                 mDisplayBuf(NULL),
                 mNumFDRcvd(0)
{
    mStreamId = allocateStreamId(); // 分配stream id(根据mStreamTable)

    switch (requestedFormat) { // max buffer number
    case CAMERA2_HAL_PIXEL_FORMAT_OPAQUE:
        mMaxBuffers = 5;
        break;
    case HAL_PIXEL_FORMAT_BLOB:
        mMaxBuffers = 1;
        break;
    default:
        ALOGE("Unsupported requested format %d", requestedFormat);
        mMaxBuffers = 1;
        break;
    }
    /*TODO: There has to be a better way to do this*/
}

再看看
/path/to/qcam-hal/QCamera/stack/mm-camera-interface/
mm_camera_interface.h
当中

typedef struct {
    uint32_t camera_handle;        /* camera object handle */
    mm_camera_info_t *camera_info; /* reference pointer of camear info */
    mm_camera_ops_t *ops;          /* API call table */
} mm_camera_vtbl_t;

mm_camera_interface.c
当中

/* camera ops v-table */
static mm_camera_ops_t mm_camera_ops = {
    .sync = mm_camera_intf_sync,
    .is_event_supported = mm_camera_intf_is_event_supported,
    .register_event_notify = mm_camera_intf_register_event_notify,
    .qbuf = mm_camera_intf_qbuf,
    .camera_close = mm_camera_intf_close,
    .query_2nd_sensor_info = mm_camera_intf_query_2nd_sensor_info,
    .is_parm_supported = mm_camera_intf_is_parm_supported,
    .set_parm = mm_camera_intf_set_parm,
    .get_parm = mm_camera_intf_get_parm,
    .ch_acquire = mm_camera_intf_add_channel,
    .ch_release = mm_camera_intf_del_channel,
    .add_stream = mm_camera_intf_add_stream,
    .del_stream = mm_camera_intf_del_stream,
    .config_stream = mm_camera_intf_config_stream,
    .init_stream_bundle = mm_camera_intf_bundle_streams,
    .destroy_stream_bundle = mm_camera_intf_destroy_bundle,
    .start_streams = mm_camera_intf_start_streams,
    .stop_streams = mm_camera_intf_stop_streams,
    .async_teardown_streams = mm_camera_intf_async_teardown_streams,
    .request_super_buf = mm_camera_intf_request_super_buf,
    .cancel_super_buf_request = mm_camera_intf_cancel_super_buf_request,
    .start_focus = mm_camera_intf_start_focus,
    .abort_focus = mm_camera_intf_abort_focus,
    .prepare_snapshot = mm_camera_intf_prepare_snapshot,
    .set_stream_parm = mm_camera_intf_set_stream_parm,
    .get_stream_parm = mm_camera_intf_get_stream_parm
};

以start stream为例子

mm_camera_intf_start_streams(mm_camera_interface
    mm_camera_start_streams(mm_camera
    	mm_channel_fsm_fn(mm_camera_channel
    		mm_channel_fsm_fn_active(mm_camera_channel
    			mm_channel_start_streams(mm_camera_channel
    				mm_stream_fsm_fn(mm_camera_stream
    					mm_stream_fsm_reg(mm_camera_stream
    						mm_camera_cmd_thread_launch(mm_camera_data
    						mm_stream_streamon(mm_camera_stream

注意:本文当中,如上这种梯度摆放,表示是调用关系,如果梯度是一样的,就表示这些方法是在上层同一个方法里面被调用的

int32_t mm_stream_streamon(mm_stream_t *my_obj)
{
    int32_t rc;
    enum v4l2_buf_type buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;

    /* Add fd to data poll thread */
    rc = mm_camera_poll_thread_add_poll_fd(&my_obj->ch_obj->poll_thread[0],
                                           my_obj->my_hdl,
                                           my_obj->fd,
                                           mm_stream_data_notify,
                                           (void*)my_obj);
    if (rc < 0) {
        return rc;
    }
    rc = ioctl(my_obj->fd, VIDIOC_STREAMON, &buf_type);
    if (rc < 0) {
        CDBG_ERROR("%s: ioctl VIDIOC_STREAMON failed: rc=%d\n",
                   __func__, rc);
        /* remove fd from data poll thread in case of failure */
        mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0], my_obj->my_hdl);
    }
    return rc;
}

看到ioctl,VIDIOC_STREAMON,可以高兴一下了,这就是V4L2规范当中用户空间和内核空间通信的方法,V4L2(Video for Linux Two)是一种经典而且成熟的视频通信协议,之前是V4L,不清楚的可以去下载它的规范,另外The Video4Linux2(http://lwn.net/Articles/203924/)也是很好的资料。
这里简单介绍下:

open(VIDEO_DEVICE_NAME, …) // 开启视频设备,一般在程序初始化的时候调用

ioctl(…) // 主要是一些需要传输数据量很小的控制操作
这里可以用的参数很多,并且通常来说我们会按照以下方式来使用,比如
VIDIOC_QUERYCAP // 查询设备能干什么
VIDIOC_CROPCAP // 查询设备crop能力
VIDIOC_S_* // set/get方法,设置/获取参数
VIDIOC_G_*
VIDIOC_REQBUFS // 分配buffer,可以有多种方式
VIDIOC_QUERYBUF // 查询分配的buffer的信息
VIDIOC_QBUF // QUEUE BUFFER 把buffer压入DRV缓存队列(这时候buffer是空的)
VIDIOC_STREAMON // 开始视频数据传输
VIDIOC_DQBUF // DEQUEUE BUFFER 把buffer从DRV缓存队列中取出(这时候buffer是有数据的)

[0…n]
QBUF -> DQBUF // 可以一直重复这个动作

VIDIOC_STREAMOFF // 停止视频数据传输

close(VIDEO_DEVICE_FD) // 关闭设备
上面就是主要的函数和简单的调用顺序,另外还有几个函数

select() // 等待事件发生,主要用在我们把存frame的buffer推给DRV以后,等待它的反应
mmap/munmap // 主要处理我们request的buffer的,buffer分配在设备的内存空间的时候需要

并且看看mm_camera_stream这个文件里面也都是这么实现的。

看完这里,我们回过头来继续看QCam HAL,当然它实现的细节也不是我上面start stream所列的那么简单,但是其实也不算复杂,觉得重要的就是状态和用到的结构。

首先是channel状态,目前只支持1个channel,但是可以有多个streams(后面会介绍,而且目前最多支持8个streams)

/* mm_channel */
typedef enum {
    MM_CHANNEL_STATE_NOTUSED = 0,   /* not used */
    MM_CHANNEL_STATE_STOPPED,       /* stopped */
    MM_CHANNEL_STATE_ACTIVE,        /* active, at least one stream active */
    MM_CHANNEL_STATE_PAUSED,        /* paused */
    MM_CHANNEL_STATE_MAX
} mm_channel_state_type_t;

它可以执行的事件

typedef enum {
    MM_CHANNEL_EVT_ADD_STREAM,
    MM_CHANNEL_EVT_DEL_STREAM,
    MM_CHANNEL_EVT_START_STREAM,
    MM_CHANNEL_EVT_STOP_STREAM,
    MM_CHANNEL_EVT_TEARDOWN_STREAM,
    MM_CHANNEL_EVT_CONFIG_STREAM,
    MM_CHANNEL_EVT_PAUSE,
    MM_CHANNEL_EVT_RESUME,
    MM_CHANNEL_EVT_INIT_BUNDLE,
    MM_CHANNEL_EVT_DESTROY_BUNDLE,
    MM_CHANNEL_EVT_REQUEST_SUPER_BUF,
    MM_CHANNEL_EVT_CANCEL_REQUEST_SUPER_BUF,
    MM_CHANNEL_EVT_START_FOCUS,
    MM_CHANNEL_EVT_ABORT_FOCUS,
    MM_CHANNEL_EVT_PREPARE_SNAPSHOT,
    MM_CHANNEL_EVT_SET_STREAM_PARM,
    MM_CHANNEL_EVT_GET_STREAM_PARM,
    MM_CHANNEL_EVT_DELETE,
    MM_CHANNEL_EVT_MAX
} mm_channel_evt_type_t;
/* mm_stream */
typedef enum { // 这里的状态要仔细,每执行一次方法,状态就需要变化
    MM_STREAM_STATE_NOTUSED = 0,      /* not used */
    MM_STREAM_STATE_INITED,           /* inited  */
    MM_STREAM_STATE_ACQUIRED,         /* acquired, fd opened  */
    MM_STREAM_STATE_CFG,              /* fmt & dim configured */
    MM_STREAM_STATE_BUFFED,           /* buf allocated */
    MM_STREAM_STATE_REG,              /* buf regged, stream off */
    MM_STREAM_STATE_ACTIVE_STREAM_ON, /* active with stream on */
    MM_STREAM_STATE_ACTIVE_STREAM_OFF, /* active with stream off */
    MM_STREAM_STATE_MAX
} mm_stream_state_type_t;

同样,stream可以执行的事件

typedef enum {
    MM_STREAM_EVT_ACQUIRE,
    MM_STREAM_EVT_RELEASE,
    MM_STREAM_EVT_SET_FMT,
    MM_STREAM_EVT_GET_BUF,
    MM_STREAM_EVT_PUT_BUF,
    MM_STREAM_EVT_REG_BUF,
    MM_STREAM_EVT_UNREG_BUF,
    MM_STREAM_EVT_START,
    MM_STREAM_EVT_STOP,
    MM_STREAM_EVT_QBUF,
    MM_STREAM_EVT_SET_PARM,
    MM_STREAM_EVT_GET_PARM,
    MM_STREAM_EVT_MAX
} mm_stream_evt_type_t;

这里每次执行函数的时候都需要检查channel/stream的状态,只有状态正确的时候才会去执行

比如你可以观察到
mm_channel的mm_channel_state_type_t state;
mm_stream的mm_stream_state_type_t state;
均表示这个结构当前的状态

另外
struct mm_camera_obj
struct mm_channel
struct mm_stream
这三个也是自上而下包含的,并且stream和channel还会持有父结构(暂且这么称呼,实际为container关系)的引用。

实际上Vendor的HAL每个都有自己实现的方法,也可能包含很多特有的东西,比如这里它会喂给ioctl一些特有的命令或者数据结构,这些我们就只有在做特定平台的时候去考虑了。这些都可能千变万化,比如OMAP4它同DRV沟通是透过rpmsg,并用OpenMAX的一套规范来实现的。

理论就这么多,接着看一个实例,比如我们在Camera Service要去start preview:

Camera2Client::startPreviewL
	StreamingProcessor->updatePreviewStream
		Camera2Device->createStream
			StreamAdapter->connectToDevice
				camera2_device_t->ops->allocate_stream // 上面有分析
				native_window_api_*或者native_window_*

	StreamingProcessor->startStream
		Camera2Device->setStreamingRequest
			Camera2Device::RequestQueue->setStreamSlot // 创建一个stream slot
				Camera2Device::RequestQueue->signalConsumerLocked
status_t Camera2Device::MetadataQueue::signalConsumerLocked() {
    status_t res = OK;
    notEmpty.signal();
    if (mSignalConsumer && mDevice != NULL) {
        mSignalConsumer = false;
        mMutex.unlock();
        res = mDevice->ops->notify_request_queue_not_empty(mDevice); // 通知Vendor HAL的run command thread去运行,
        															 // notify_request_queue_not_empty这个事件不是每次都会触发的,只有初始化时候
        															 // 或者run command thread在dequeue的时候发现数据为NULL,
        															 // 而Camera Service之变又有新的request进来的时候才会去触发
        															 // 可以说是减轻负担吧,不用没有请求的时候,thread也一直在那里
        															 // 不过通常碰到这样的情况都是利用锁让thread停在那里
        mMutex.lock();
    }
    return res;
}

然而在Qualcomm HAL当中

int notify_request_queue_not_empty(const struct camera2_device *device) // 这个方法注册到camera2_device_ops_t当中
	QCameraHardwareInterface->notify_request_queue_not_empty()
		pthread_create(&mCommandThread, &attr, command_thread, (void *)this) != 0)
void *command_thread(void *obj)
{
	...
	pme->runCommandThread(obj);
}
void QCameraHardwareInterface::runCommandThread(void *data)
{
    /**
     * This function implements the main service routine for the incoming
     * frame requests, this thread routine is started everytime we get a 
     * notify_request_queue_not_empty trigger, this thread makes the 
     * assumption that once it receives a NULL on a dequest_request call 
     * there will be a fresh notify_request_queue_not_empty call that is
     * invoked thereby launching a new instance of this thread. Therefore,
     * once we get a NULL on a dequeue request we simply let this thread die
     */ 
    int res;
    camera_metadata_t *request=NULL;
    mPendingRequests=0;

    while (mRequestQueueSrc) { // mRequestQueueSrc是通过set_request_queue_src_ops设置进来的
    						   // 参见Camera2Device::MetadataQueue::setConsumerDevice
    						   // 在Camera2Device::initialize当中被调用
        ALOGV("%s:Dequeue request using mRequestQueueSrc:%p",__func__,mRequestQueueSrc);
        mRequestQueueSrc->dequeue_request(mRequestQueueSrc, &request); // 取framework request
        if (request==NULL) {
            ALOGE("%s:No more requests available from src command \
                    thread dying",__func__);
            return;
        }
        mPendingRequests++;

        /* Set the metadata values */

        /* Wait for the SOF for the new metadata values to be applied */

        /* Check the streams that need to be active in the stream request */
        sort_camera_metadata(request);

        camera_metadata_entry_t streams;
        res = find_camera_metadata_entry(request,
                ANDROID_REQUEST_OUTPUT_STREAMS,
                &streams);
        if (res != NO_ERROR) {
            ALOGE("%s: error reading output stream tag", __FUNCTION__);
            return;
        }

        res = tryRestartStreams(streams); // 会去prepareStream和streamOn,后面有详细代码
        if (res != NO_ERROR) {
            ALOGE("error tryRestartStreams %d", res);
            return;
        }

        /* 3rd pass: Turn on all streams requested */
        for (uint32_t i = 0; i < streams.count; i++) {
            int streamId = streams.data.u8[i];
            QCameraStream *stream = QCameraStream::getStreamAtId(streamId);

            /* Increment the frame pending count in each stream class */

            /* Assuming we will have the stream obj in had at this point may be
             * may be multiple objs in which case we loop through array of streams */
            stream->onNewRequest();
        }
        ALOGV("%s:Freeing request using mRequestQueueSrc:%p",__func__,mRequestQueueSrc);
        /* Free the request buffer */
        mRequestQueueSrc->free_request(mRequestQueueSrc,request);
        mPendingRequests--;
        ALOGV("%s:Completed request",__func__);
    }
 
    QCameraStream::streamOffAll();
}

下面这个方法解释mRequestQueueSrc来自何处

// Connect to camera2 HAL as consumer (input requests/reprocessing)
status_t Camera2Device::MetadataQueue::setConsumerDevice(camera2_device_t *d) {
    ATRACE_CALL();
    status_t res;
    res = d->ops->set_request_queue_src_ops(d,
            this);
    if (res != OK) return res;
    mDevice = d;
    return OK;
}

因为

QCameraStream_preview->prepareStream
	QCameraStream->initStream
		mm_camera_vtbl_t->ops->add_stream(... stream_cb_routine ...) // 这是用来返回数据的callback,带mm_camera_super_buf_t*和void*两参数
			mm_camera_add_stream
				mm_channel_fsm_fn(..., MM_CHANNEL_EVT_ADD_STREAM, ..., mm_evt_paylod_add_stream_t)
					mm_channel_fsm_fn_stopped
						mm_channel_add_stream(..., mm_camera_buf_notify_t, ...)
							mm_stream_fsm_inited


在mm_channel_add_stream当中有把mm_camera_buf_notify_t包装到mm_stream_t

mm_stream_t *stream_obj = NULL;
/* initialize stream object */
memset(stream_obj, 0, sizeof(mm_stream_t));
/* cd through intf always palced at idx 0 of buf_cb */
stream_obj->buf_cb[0].cb = buf_cb; // callback
stream_obj->buf_cb[0].user_data = user_data;
stream_obj->buf_cb[0].cb_count = -1; /* infinite by default */ // 默认无限次数

并且mm_stream_fsm_inited,传进来的event参数也是MM_STREAM_EVT_ACQUIRE

int32_t mm_stream_fsm_inited(mm_stream_t *my_obj,
                             mm_stream_evt_type_t evt,
                             void * in_val,
                             void * out_val)
{
    int32_t rc = 0;
    char dev_name[MM_CAMERA_DEV_NAME_LEN];

    switch (evt) {
    case MM_STREAM_EVT_ACQUIRE:
        if ((NULL == my_obj->ch_obj) || (NULL == my_obj->ch_obj->cam_obj)) {
            CDBG_ERROR("%s: NULL channel or camera obj\n", __func__);
            rc = -1;
            break;
        }

        snprintf(dev_name, sizeof(dev_name), "/dev/%s",
                 mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl));

        my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK); // 打开视频设备
        if (my_obj->fd <= 0) {
            CDBG_ERROR("%s: open dev returned %d\n", __func__, my_obj->fd);
            rc = -1;
            break;
        }
        rc = mm_stream_set_ext_mode(my_obj);
        if (0 == rc) {
            my_obj->state = MM_STREAM_STATE_ACQUIRED; // mm_stream_state_type_t
        } else {
            /* failed setting ext_mode
             * close fd */
            if(my_obj->fd > 0) {
                close(my_obj->fd);
                my_obj->fd = -1;
            }
            break;
        }
        rc = get_stream_inst_handle(my_obj);
        if(rc) {
            if(my_obj->fd > 0) {
                close(my_obj->fd);
                my_obj->fd = -1;
            }
        }
        break;
    default:
        CDBG_ERROR("%s: Invalid evt=%d, stream_state=%d",
                   __func__,evt,my_obj->state);
        rc = -1;
        break;
    }
    return rc;
}

还有

QCameraStream->streamOn
	mm_camera_vtbl_t->ops->start_streams
		mm_camera_intf_start_streams
			mm_camera_start_streams
				mm_channel_fsm_fn(..., MM_CHANNEL_EVT_START_STREAM, ...)
					mm_stream_fsm_fn(..., MM_STREAM_EVT_START, ...)
						mm_camera_cmd_thread_launch // 启动CB线程
						mm_stream_streamon(mm_stream_t)
							mm_camera_poll_thread_add_poll_fd(..., mm_stream_data_notify , ...)

static void mm_stream_data_notify(void* user_data)
{
    mm_stream_t *my_obj = (mm_stream_t*)user_data;
    int32_t idx = -1, i, rc;
    uint8_t has_cb = 0;
    mm_camera_buf_info_t buf_info;

    if (NULL == my_obj) {
        return;
    }

    if (MM_STREAM_STATE_ACTIVE_STREAM_ON != my_obj->state) {
        /* this Cb will only received in active_stream_on state
         * if not so, return here */
        CDBG_ERROR("%s: ERROR!! Wrong state (%d) to receive data notify!",
                   __func__, my_obj->state);
        return;
    }

    memset(&buf_info, 0, sizeof(mm_camera_buf_info_t));

    pthread_mutex_lock(&my_obj->buf_lock);
    rc = mm_stream_read_msm_frame(my_obj, &buf_info); // 通过ioctl(..., VIDIOC_DQBUF, ...)读取frame数据
    if (rc != 0) {
        pthread_mutex_unlock(&my_obj->buf_lock);
        return;
    }
    idx = buf_info.buf->buf_idx;

    /* update buffer location */
    my_obj->buf_status[idx].in_kernel = 0;

    /* update buf ref count */
    if (my_obj->is_bundled) {
        /* need to add into super buf since bundled, add ref count */
        my_obj->buf_status[idx].buf_refcnt++;
    }

    for (i=0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
        if(NULL != my_obj->buf_cb[i].cb) {
            /* for every CB, add ref count */
            my_obj->buf_status[idx].buf_refcnt++;
            has_cb = 1;
        }
    }
    pthread_mutex_unlock(&my_obj->buf_lock);

    mm_stream_handle_rcvd_buf(my_obj, &buf_info); // mm_camera_queue_enq,往queue里面丢frame数据(
    											  // 前提是有注册callback),并透过sem_post通知queue
    											  // 然后mm_camera_cmd_thread_launch启动的线程会
    											  // 轮循读取数据,然后执行CB
}

这样就会导致在stream on的时候stream_cb_routine(实现在QCameraStream当中)就会一直执行

void stream_cb_routine(mm_camera_super_buf_t *bufs,
                       void *userdata)
{
    QCameraStream *p_obj=(QCameraStream*) userdata;
    switch (p_obj->mExtImgMode) { // 这个mode在prepareStream的时候就会确定
    case MM_CAMERA_PREVIEW:
        ALOGE("%s : callback for MM_CAMERA_PREVIEW", __func__);
        ((QCameraStream_preview *)p_obj)->dataCallback(bufs); // CAMERA_PREVIEW和CAMERA_VIDEO是一样的?
        break;
    case MM_CAMERA_VIDEO:
        ALOGE("%s : callback for MM_CAMERA_VIDEO", __func__);
        ((QCameraStream_preview *)p_obj)->dataCallback(bufs);
        break;
    case MM_CAMERA_SNAPSHOT_MAIN:
        ALOGE("%s : callback for MM_CAMERA_SNAPSHOT_MAIN", __func__);
        p_obj->p_mm_ops->ops->qbuf(p_obj->mCameraHandle,
                                   p_obj->mChannelId,
                                   bufs->bufs[0]);
		break;
	case MM_CAMERA_SNAPSHOT_THUMBNAIL:
		break;
	default:
		break;
    }
}
void QCameraStream::dataCallback(mm_camera_super_buf_t *bufs)
{
    if (mPendingCount != 0) { // 这个dataCallback是一直在都在回来么?
    						   // 而且从代码来看设置下去的callback次数默认是-1,-1就表示infinite。
    						   // 似乎只能这样才能解释,否则没人触发的话,即使mPendingCount在onNewRequest当中加1了
    						   // 这里也感知不到
        ALOGD("Got frame request");
        pthread_mutex_lock(&mFrameDeliveredMutex);
        mPendingCount--;
        ALOGD("Completed frame request");
        pthread_cond_signal(&mFrameDeliveredCond);
        pthread_mutex_unlock(&mFrameDeliveredMutex);
        processPreviewFrame(bufs);
    } else {
        p_mm_ops->ops->qbuf(mCameraHandle,
                mChannelId, bufs->bufs[0]); // 如果没有需要数据的情况,直接把buffer压入DRV的队列当中,会call到V4L2的QBUF
    }
}

比较好奇的是在手里这版QCam HAL的code当中camera2_frame_queue_dst_ops_t没有被用到

int QCameraHardwareInterface::set_frame_queue_dst_ops(
    const camera2_frame_queue_dst_ops_t *frame_dst_ops)
{
    mFrameQueueDst = frame_dst_ops; // 这个现在似乎没有用到嘛
    return OK;
}

这样Camera Service的FrameProcessor的Camera2Device->getNextFrame就永远也获取不到数据,不知道是不是我手里的这版代码的问题,而且在最新的Qualcomm Camera HAL代码也不在AOSP树当中了,而是直接以proprietary形式给的so档,这只是题外话。

所以总体来看,这里可能有几个QCameraStream,每个stream负责自己的事情。
他们之间也有相互关系,比如有可能新的stream进来会导致其他已经stream-on的stream重新启动。

在Camera HAL 2.0当中我们还有个重点就是re-process stream
简单的说就是把output stream作为input stream再次添加到BufferQueue中,让其他的consumer来处理,就类似一个chain一样。
目前在ZslProcessor当中有用到。

ZslProcessor->updateStream
	Camera2Device->createStream
	Camera2Device->createReprocessStreamFromStream // release的时候是先delete re-process
		new ReprocessStreamAdapter
		ReprocessStreamAdapter->connectToDevice
			camera2_device_t->ops->allocate_reprocess_stream_from_stream

这里ReprocessStreamAdapter实际就是camera2_stream_in_ops_t,负责管理re-process的stream。

但是这版的代码Qualcomm也似乎没有去实现,所以暂时到此为止,如果后面找到相应的代码,再来看。

所以看完这么多不必觉得惊讶,站在Camera Service的立场,它持有两个MetadataQueue,mRequestQueue和mFrameQueue。
app请求的动作,比如set parameter/start preview/start recording会直接转化为request,放到mRequestQueue,然后去重启preview/recording stream。
比如capture也会转换为request,放到mRequestQueue。
如果有必要,会通过notify_request_queue_not_empty去通知QCam HAL有请求需要处理,然后QCam HAL会启动一个线程(QCameraHardwareInterface::runCommandThread)去做处理。直到所有request处理完毕退出线程。
在这个处理的过程当中会分别调用到每个stream的processPreviewFrame,有必要的话它每个都会调用自己后续的callback。
还有一个实现的细节就是,stream_cb_routine是从start stream就有开始注册在同一个channel上的,而stream_cb_routine间接调用QCameraStream::dataCallback(当然stream_cb_routine有去指定这个callback回来的原因是什么,就好调用对应的dataCallback),这个callback是一直都在回来,所以每次new request让mPendingCount加1之后,dataCallback回来才会调用processPreviewFrame,否则就直接把buffer再次压回DRV队列当中。

void QCameraStream::dataCallback(mm_camera_super_buf_t *bufs)
{
    if (mPendingCount != 0) { // 这个dataCallback是一直在都在回来么?
    						   // 而且从代码来看设置下去的callback次数默认是-1,-1就表示infinite。
    						   // 似乎只能这样才能解释,否则没人触发的话,即使mPendingCount在onNewRequest当中加1了
    						   // 这里也感知不到
        ALOGD("Got frame request");
        pthread_mutex_lock(&mFrameDeliveredMutex);
        mPendingCount--;
        ALOGD("Completed frame request");
        pthread_cond_signal(&mFrameDeliveredCond);
        pthread_mutex_unlock(&mFrameDeliveredMutex);
        processPreviewFrame(bufs);
    } else {
        p_mm_ops->ops->qbuf(mCameraHandle,
                mChannelId, bufs->bufs[0]); // 如果没有需要数据的情况,直接把buffer压入DRV的队列当中,会call到V4L2的QBUF
    }
}
void QCameraStream::onNewRequest()
{
    ALOGI("%s:E",__func__);
    pthread_mutex_lock(&mFrameDeliveredMutex);
    ALOGI("Sending Frame request");
    mPendingCount++;
    pthread_cond_wait(&mFrameDeliveredCond, &mFrameDeliveredMutex); // 等带一个请求处理完,再做下一个请求
    ALOGV("Got frame");
    pthread_mutex_unlock(&mFrameDeliveredMutex);
    ALOGV("%s:X",__func__);
}

processPreviewFrame会调用到创建这个stream的时候关联进来的那个BufferQueue的enqueue_buffer方法,把数据塞到BufferQueue中,然后对应的consumer就会收到了。
比如在Android Camera HAL 2.0当中目前有
camera2/BurstCapture.h
camera2/CallbackProcessor.h
camera2/JpegProcessor.h
camera2/StreamingProcessor.h
camera2/ZslProcessor.h
实现了对应的Consumer::FrameAvailableListener,但是burst-capture现在可以不考虑,因为都还只是stub实现。

ZslProcessor.h和CaptureSequencer.h都有去实现FrameProcessor::FilteredListener的onFrameAvailable(…)
但是我们之前讲过这版QCam HAL没有实现,所以FrameProcessor是无法获取到meta data的。
所以这样来看onFrameAbailable都不会得到通知。(我相信是我手里的这版代码的问题啦)

之前我们说过QCam HAL有部分东西没有实现,所以mFrameQueue就不会有数据,但是它本来应该是DRV回来的元数据会queue到这里面。

另外
CaptureSequencer.h还有去实现onCaptureAvailable,当JpegProcessor处理完了会通知它。

好奇?多个stream(s)不是同时返回的,这样如果CPU处理快慢不同就会有时间差?还有很好奇DRV是如何处理Video snapshot的,如果buffer是顺序的,就会存在Video少一个frame,如果不是顺序的,那就是DRV一次返回多个buffer?以前真没有想过这个问题@_@

初探Camera HAL 2.0

Android在4.2的时候对Camera HAL做了比较大的改动,基本是废弃了原先的CameraHardwareInterface,又弄了一套新的。所以它提供了两种方式实现,根据厂商实现HAL的版本在Camera Service层自动加载对应版本的fwk HAL。目前这块的介绍还是比较少,实现的厂商也比较少,大概有Qualcomm和Samsung在其platform上有去实现。
以下文字是以Qualcomm平台为基础,在AOSP代码基础上得出的一些理解,存在错误的地方请指出,本文也会随着进一步的学习而对错误的地方进行修正!

我有观看这样一个介绍的视频(题外话,开挂国家的男人讲的英语基本听不懂),自己想法出去看,因为视频无法下载下来,所以我截图了一些,放在Flickr上。
www.youtube.com/watch?v=Lald5txnoHw
以下是一段有关这段视频以及HAL 2.0的简单介绍

The Linux Foundation
Android Builders Summit 2013
Camera 2.0: The New Camera Hardware Interface in Android 4.2
By Balwinder Kaur & Ashutosh Gupta
San Francisco, California

Android 4.2 was released with a new Camera Hardware Abstraction Layer (HAL) Camera 2.0. Camera 2.0 has a big emphasis on collection and providing metadata associated with each frame. It also provides the ability to re-process streams. Although the APIs at the SDK level are yet to expose any new APIS to the end user, the Camera HAL and the Camera Service architecture has been revamped to a different architecture. This presentation provides an insight into the new architecture as well as covering some of the challenges faced in building production quality Camera HAL implementations.
The intended audience for this conference session is engineers who want to learn more about the Android Camera Internals. This talk should facilitate engineers wanting to integrate, improve or innovate using the Camera subsystem.

以下是我自己的一些理解。

HAL2如何同DRV沟通的?

HAL2 fwk和Vendor implementation是通过/path/to/aosp/hardware/qcom/camera/QCamera/HAL2/wrapper/QualcommCamera.cpp来绑定的,初始化名字为HMI的struct,这个跟原先的是一样的。

static hw_module_methods_t camera_module_methods = {
    open: camera_device_open,
};

static hw_module_t camera_common  = {
    tag: HARDWARE_MODULE_TAG,
    module_api_version: CAMERA_MODULE_API_VERSION_2_0,
    hal_api_version: HARDWARE_HAL_API_VERSION,
    id: CAMERA_HARDWARE_MODULE_ID,
    name: "Qcamera",
    author:"Qcom",
    methods: &camera_module_methods,
    dso: NULL,
    reserved:  {0},
};

camera_module_t HAL_MODULE_INFO_SYM = {
    common: camera_common,
    get_number_of_cameras: get_number_of_cameras,
    get_camera_info: get_camera_info,
};

camera2_device_ops_t camera_ops = {
    set_request_queue_src_ops:           android::set_request_queue_src_ops,
    notify_request_queue_not_empty:      android::notify_request_queue_not_empty,
    set_frame_queue_dst_ops:             android::set_frame_queue_dst_ops,
    get_in_progress_count:               android::get_in_progress_count,
    flush_captures_in_progress:          android::flush_captures_in_progress,
    construct_default_request:           android::construct_default_request,

    allocate_stream:                     android::allocate_stream,
    register_stream_buffers:             android::register_stream_buffers,
    release_stream:                      android::release_stream,

    allocate_reprocess_stream:           android::allocate_reprocess_stream,
    allocate_reprocess_stream_from_stream: android::allocate_reprocess_stream_from_stream,
    release_reprocess_stream:            android::release_reprocess_stream,

    trigger_action:                      android::trigger_action,
    set_notify_callback:                 android::set_notify_callback,
    get_metadata_vendor_tag_ops:         android::get_metadata_vendor_tag_ops,
    dump:                                android::dump,
};

QCameraHWI当中

QCameraHardwareInterface::
QCameraHardwareInterface(int cameraId, int mode)
                  : mCameraId(cameraId)
{

    cam_ctrl_dimension_t mDimension;

    /* Open camera stack! */
    memset(&mMemHooks, 0, sizeof(mm_camear_mem_vtbl_t));
    mMemHooks.user_data=this;
    mMemHooks.get_buf=get_buffer_hook;
    mMemHooks.put_buf=put_buffer_hook;

    mCameraHandle=camera_open(mCameraId, &mMemHooks);
    ALOGV("Cam open returned %p",mCameraHandle);
    if(mCameraHandle == NULL) {
        ALOGE("startCamera: cam_ops_open failed: id = %d", mCameraId);
        return;
    }
    mCameraHandle->ops->sync(mCameraHandle->camera_handle);

    mChannelId=mCameraHandle->ops->ch_acquire(mCameraHandle->camera_handle);
    if(mChannelId<=0)
    {
        ALOGE("%s:Channel aquire failed",__func__);
        mCameraHandle->ops->camera_close(mCameraHandle->camera_handle);
        return;
    }

    /* Initialize # of frame requests the HAL is handling to zero*/
    mPendingRequests=0;
}

调用mm_camera_interface当中的camera_open()

进而调用mm_camera当中的mm_camera_open()

这里还是是通过V4L2去同DRV沟通的

但是为什么wrapper当中有一些操作Qualcomm没有去实现呢?

int trigger_action(const struct camera2_device *,
        uint32_t trigger_id,
        int32_t ext1,
        int32_t ext2)
{
    return INVALID_OPERATION;
}

比如这个上面理论上来说auto focus等等一些操作需要通过它来触发,但是它却是stub实现,这些不是必须的,还是藏在了某个角落我没有发现?

目前用的是libmmcamera_interface还是libmmcamera_interface2?
从代码看应该是libmmcamera_interface

再看一个实例,start preview 这是一个怎样的过程?
Camera2Client::startPreview(…)
Camera2Client::startPreviewL(…)
StreamingProcessor::updatePreviewStream(…)
Camera2Device::createStream(…)
Camera2Device::StreamAdapter::connectToDevice(…)
camera2_device_t->ops->allocate_stream(…)

这个allocate_stream是Vendor实现的,对于Qualcomm的Camera,位于/path/to/aosp/hardware/qcom/camera/QCamera/HAL2/wrapper/QualcommCamera.cpp

android::allocate_stream(…)
QCameraHardwareInterface::allocate_stream(…)
QCameraStream::createInstance(…)

QCameraStream_preview::createInstance(uint32_t CameraHandle,
                        uint32_t ChannelId,
                        uint32_t Width,
                        uint32_t Height,
                        int requestedFormat,
                        mm_camera_vtbl_t *mm_ops,
                        camera_mode_t mode)
{
  QCameraStream* pme = new QCameraStream_preview(CameraHandle,
                        ChannelId,
                        Width,
                        Height,
                        requestedFormat,
                        mm_ops,
                        mode);
  return pme;
}

尽管这改进目前还是还是算比较“新”的一个东西,但这是趋势,所以了解下也无妨!
P.S. 4.3 在今天都发布了,我还在看4.2的东西,吼吼

Driver回来的数据是如何显示出来的

想要知道Driver回来的数据是如何显示出来的吗?
这个工作是在哪一层做的?

跟踪代码就可以理清楚这一过程,这里代码版本是Jelly Bean。

CameraHardwareInterface初始化的时候会初始化好window有关的数据

struct camera_preview_window {
    struct preview_stream_ops nw;
    void *user;
};

struct camera_preview_window mHalPreviewWindow;

status_t initialize(hw_module_t *module)
{
    ALOGI("Opening camera %s", mName.string());
    int rc = module->methods->open(module, mName.string(),
                                   (hw_device_t **)&mDevice);
    if (rc != OK) {
        ALOGE("Could not open camera %s: %d", mName.string(), rc);
        return rc;
    }
    initHalPreviewWindow();
    return rc;
}

void initHalPreviewWindow()
{
    mHalPreviewWindow.nw.cancel_buffer = __cancel_buffer;
    mHalPreviewWindow.nw.lock_buffer = __lock_buffer;
    mHalPreviewWindow.nw.dequeue_buffer = __dequeue_buffer;
    mHalPreviewWindow.nw.enqueue_buffer = __enqueue_buffer;
    mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count;
    mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry;
    mHalPreviewWindow.nw.set_crop = __set_crop;
    mHalPreviewWindow.nw.set_timestamp = __set_timestamp;
    mHalPreviewWindow.nw.set_usage = __set_usage;
    mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval;

    mHalPreviewWindow.nw.get_min_undequeued_buffer_count =
            __get_min_undequeued_buffer_count;
}

static int __dequeue_buffer(struct preview_stream_ops* w,
                            buffer_handle_t** buffer, int *stride)
{
    int rc;
    ANativeWindow *a = anw(w);
    ANativeWindowBuffer* anb;
    rc = native_window_dequeue_buffer_and_wait(a, &anb);
    if (!rc) {
        *buffer = &anb->handle;
        *stride = anb->stride;
    }
    return rc;
}

static int __enqueue_buffer(struct preview_stream_ops* w,
                  buffer_handle_t* buffer)
{
    ANativeWindow *a = anw(w);
    return a->queueBuffer(a,
              container_of(buffer, ANativeWindowBuffer, handle), -1);
}

继而在Vendor的HAL当中,这里以TI为例子

/**
   @brief Sets ANativeWindow object.

   Preview buffers provided to CameraHal via this object. DisplayAdapter will be interfacing with it
   to render buffers to display.

   @param[in] window The ANativeWindow object created by Surface flinger
   @return NO_ERROR If the ANativeWindow object passes validation criteria
   @todo Define validation criteria for ANativeWindow object. Define error codes for scenarios

 */
status_t CameraHal::setPreviewWindow(struct preview_stream_ops *window)
{
    status_t ret = NO_ERROR;
    CameraAdapter::BuffersDescriptor desc;

    LOG_FUNCTION_NAME;
    mSetPreviewWindowCalled = true;

   ///If the Camera service passes a null window, we destroy existing window and free the DisplayAdapter
    if(!window)
    {
        if(mDisplayAdapter.get() != NULL)
        {
            ///NULL window passed, destroy the display adapter if present
            CAMHAL_LOGDA("NULL window passed, destroying display adapter");
            mDisplayAdapter.clear();
            ///@remarks If there was a window previously existing, we usually expect another valid window to be passed by the client
            ///@remarks so, we will wait until it passes a valid window to begin the preview again
            mSetPreviewWindowCalled = false;
        }
        CAMHAL_LOGDA("NULL ANativeWindow passed to setPreviewWindow");
        return NO_ERROR;
    }else if(mDisplayAdapter.get() == NULL)
    {
        // Need to create the display adapter since it has not been created
        // Create display adapter
        mDisplayAdapter = new ANativeWindowDisplayAdapter();

        ...

        // DisplayAdapter needs to know where to get the CameraFrames from inorder to display
        // Since CameraAdapter is the one that provides the frames, set it as the frame provider for DisplayAdapter
        mDisplayAdapter->setFrameProvider(mCameraAdapter);

        ...

        // Update the display adapter with the new window that is passed from CameraService
        // 这里的window(ANativeWindowDisplayAdapter::mANativeWindow)就是CameraHardwareInterface::mHalPreviewWindow::nw
        ret  = mDisplayAdapter->setPreviewWindow(window);
        if(ret!=NO_ERROR)
            {
            CAMHAL_LOGEB("DisplayAdapter setPreviewWindow returned error %d", ret);
            }

        if(mPreviewStartInProgress)
        {
            CAMHAL_LOGDA("setPreviewWindow called when preview running");
            // Start the preview since the window is now available
            ret = startPreview();
        }
    } else {
        // Update the display adapter with the new window that is passed from CameraService
        ret = mDisplayAdapter->setPreviewWindow(window);
        if ( (NO_ERROR == ret) && previewEnabled() ) {
            restartPreview();
        } else if (ret == ALREADY_EXISTS) {
            // ALREADY_EXISTS should be treated as a noop in this case
            ret = NO_ERROR;
        }
    }
    LOG_FUNCTION_NAME_EXIT;

    return ret;

}

注意看setPreviewWindow之前我加的一行中文注释

在TI的HAL当中有去做包一些adaper出来,实际就是取数据的(比如V4L2CameraAdapter,也就是CameraAdapter),和数据发其他地方(比如ANativeWindowDisplayAdapter,也就是DisplayAdapter)去用的。

在TI的HW当中,有previewThread(位于V4LCameraAdapter当中),previewThread会不断的通过IOCTL从V4L2获取frame

int V4LCameraAdapter::previewThread()
{
    status_t ret = NO_ERROR;
    int width, height;
    CameraFrame frame;

    if (mPreviewing)
        {
        int index = 0;
        char *fp = this->GetFrame(index);

        uint8_t* ptr = (uint8_t*) mPreviewBufs.keyAt(index);

        int width, height;
        uint16_t* dest = (uint16_t*)ptr;
        uint16_t* src = (uint16_t*) fp;
        mParams.getPreviewSize(&width, &height);

        ...

        mParams.getPreviewSize(&width, &height);
        frame.mFrameType = CameraFrame::PREVIEW_FRAME_SYNC;
        frame.mBuffer = ptr;

        ...

        ret = sendFrameToSubscribers(&frame);

        }

    return ret;
}

char * V4LCameraAdapter::GetFrame(int &index)
{
    int ret;

    mVideoInfo->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    mVideoInfo->buf.memory = V4L2_MEMORY_MMAP;

    /* DQ */
    ret = ioctl(mCameraHandle, VIDIOC_DQBUF, &mVideoInfo->buf);
    if (ret < 0) {
        CAMHAL_LOGEA("GetFrame: VIDIOC_DQBUF Failed");
        return NULL;
    }
    nDequeued++;

    index = mVideoInfo->buf.index;

    return (char *)mVideoInfo->mem[mVideoInfo->buf.index];
}



class CameraFrame
{
    public:

    enum FrameType
        {
            PREVIEW_FRAME_SYNC = 0x1, ///SYNC implies that the frame needs to be explicitly returned after consuming in order to be filled by camera again
            PREVIEW_FRAME = 0x2   , ///Preview frame includes viewfinder and snapshot frames
            IMAGE_FRAME_SYNC = 0x4, ///Image Frame is the image capture output frame
            IMAGE_FRAME = 0x8,
            VIDEO_FRAME_SYNC = 0x10, ///Timestamp will be updated for these frames
            VIDEO_FRAME = 0x20,
            FRAME_DATA_SYNC = 0x40, ///Any extra data assosicated with the frame. Always synced with the frame
            FRAME_DATA= 0x80,
            RAW_FRAME = 0x100,
            SNAPSHOT_FRAME = 0x200,
            ALL_FRAMES = 0xFFFF   ///Maximum of 16 frame types supported
        };

    enum FrameQuirks
    {
        ENCODE_RAW_YUV422I_TO_JPEG = 0x1 << 0,
        HAS_EXIF_DATA = 0x1 << 1,
    };

    //default contrustor
    CameraFrame():
        ...
        {
        ...
    }

    //copy constructor
    CameraFrame(const CameraFrame &frame) :
        ...
        {
        ...
    }

    void *mCookie;
    void *mCookie2;
    void *mBuffer;
    int mFrameType;
    nsecs_t mTimestamp;
    unsigned int mWidth, mHeight;
    uint32_t mOffset;
    unsigned int mAlignment;
    int mFd;
    size_t mLength;
    unsigned mFrameMask;
    unsigned int mQuirks;
    unsigned int mYuv[2];
    ///@todo add other member vars like  stride etc
};

对于ANativeWindowDisplayAdapter,它有给自己配置一个FrameProvider,实际就是会去注册一种callback,当从driver回来数据的时候会去通知对应的callback,当然这个时候还没有真正的去注册

int ANativeWindowDisplayAdapter::setFrameProvider(FrameNotifier *frameProvider)
{
    LOG_FUNCTION_NAME;

    // Check for NULL pointer
    if ( !frameProvider ) {
        CAMHAL_LOGEA("NULL passed for frame provider");
        LOG_FUNCTION_NAME_EXIT;
        return BAD_VALUE;
    }

    //Release any previous frame providers
    if ( NULL != mFrameProvider ) {
        delete mFrameProvider;
    }

    /** Dont do anything here, Just save the pointer for use when display is
         actually enabled or disabled
    */
    mFrameProvider = new FrameProvider(frameProvider, this, frameCallbackRelay);

    LOG_FUNCTION_NAME_EXIT;

    return NO_ERROR;
}

并且我们在开始start preview的时候会去注册这个callback,当然同时在start preview的时候我们也会去分配一块buffer,这样driver回来的数据才有地方放,申请buffer如下
CameraHal::allocateBuffer(…) __dequeue_buffer

下面是启动living preview,注册callback

int ANativeWindowDisplayAdapter::enableDisplay(int width, int height, struct timeval *refTime, S3DParameters *s3dParams)
{
    Semaphore sem;
    TIUTILS::Message msg;

    LOG_FUNCTION_NAME;

    if ( mDisplayEnabled )
        {
        CAMHAL_LOGDA("Display is already enabled");
        LOG_FUNCTION_NAME_EXIT;

        return NO_ERROR;
    }

#if 0 //TODO: s3d is not part of bringup...will reenable
    if (s3dParams)
        mOverlay->set_s3d_params(s3dParams->mode, s3dParams->framePacking,
                                    s3dParams->order, s3dParams->subSampling);
#endif

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

    if ( NULL != refTime )
        {
        Mutex::Autolock lock(mLock);
        memcpy(&mStandbyToShot, refTime, sizeof(struct timeval));
        mMeasureStandby = true;
    }

#endif

    //Send START_DISPLAY COMMAND to display thread. Display thread will start and then wait for a message
    sem.Create();
    msg.command = DisplayThread::DISPLAY_START;

    // Send the semaphore to signal once the command is completed
    msg.arg1 = &sem;

    ///Post the message to display thread
    mDisplayThread->msgQ().put(&msg);

    ///Wait for the ACK - implies that the thread is now started and waiting for frames
    sem.Wait();

    // Register with the frame provider for frames
    mFrameProvider->enableFrameNotification(CameraFrame::PREVIEW_FRAME_SYNC);

    mDisplayEnabled = true;
    mPreviewWidth = width;
    mPreviewHeight = height;

    CAMHAL_LOGVB("mPreviewWidth = %d mPreviewHeight = %d", mPreviewWidth, mPreviewHeight);

    LOG_FUNCTION_NAME_EXIT;

    return NO_ERROR;
}



int FrameProvider::enableFrameNotification(int32_t frameTypes)
{
    LOG_FUNCTION_NAME;
    status_t ret = NO_ERROR;

    ///Enable the frame notification to CameraAdapter (which implements FrameNotifier interface)
    mFrameNotifier->enableMsgType(frameTypes<<MessageNotifier::FRAME_BIT_FIELD_POSITION
                                    , mFrameCallback
                                    , NULL
                                    , mCookie
                                    );

    // 把frameCallbackRelay这个callback添加到BaseCameraAdapter::mFrameSubscribers当中
    // 详见 preview_stream_ops_t*  mANativeWindow; 以及 ANativeWindowDisplayAdapter::PostFrame(...)
    // __enqueue_buffer
    // 最终会把数据更新到CameraHardwareInterface的sp<ANativeWindow> mPreviewWindow;
    // 这样每过来一个frame,也就是去更新我们看到的preview数据

    LOG_FUNCTION_NAME_EXIT;
    return ret;
}

见上面代码中嵌的中文注释,这是我加的。

另外displayThread(位于ANativeWindowDisplayAdapter当中),displayThread会听一些从Camera HAL来的消息,然后执行,这个不是很复杂。

也就是说数据是怎么贴到view上去,这是HAL这层做掉的,虽然只是一个调用ANativeWindow的过程!

这只是一个粗糙的过程,再深层细节的我们暂时不研究。

Android的HAL和Vendor的HAL实现是如何关联起来的

以TI的platform为例子,也就是这两个文件
/path/to/aosp/frameworks/av/services/camera/libcameraservice/CameraHardwareInterface.h

/path/to/aosp/hardware/ti/omap4xxx/camera/CameraHal.cpp
是如何关联的。

谁来加载camera.$platform$.so,这个是真正的HAL的实现,里面实际最终会去调用Driver(中间透过rpmsg-omx1)。
好突然,可能你现在还不知道camera.$platform$.so是什么?是这样的,但是到你看到后面你就会发现Vendor的HAL实现是包成一个so档的,也就是这个。

void CameraService::onFirstRef()
{
    BnCameraService::onFirstRef();

    if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,
                (const hw_module_t **)&mModule) < 0) {
        ALOGE("Could not load camera HAL module");
        mNumberOfCameras = 0;
    }
    else {
        mNumberOfCameras = mModule->get_number_of_cameras();
        if (mNumberOfCameras > MAX_CAMERAS) {
            ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
                    mNumberOfCameras, MAX_CAMERAS);
            mNumberOfCameras = MAX_CAMERAS;
        }
        for (int i = 0; i < mNumberOfCameras; i++) {
            setCameraFree(i);
        }
    }
}

在/path/to/aosp/hardware/libhardware/hardware.c当中有下面这些方法

hw_get_module(const char *id, const struct hw_module_t **module) // 这里定义的是hw_module_t,但是Vendor HAL当中实现的是camera_module_t
/* Loop through the configuration variants looking for a module */
for (i=0 ; i<HAL_VARIANT_KEYS_COUNT+1 ; i++) {
    if (i < HAL_VARIANT_KEYS_COUNT) {
        if (property_get(variant_keys[i], prop, NULL) == 0) {
            continue;
        }
        snprintf(path, sizeof(path), "%s/%s.%s.so",
                 HAL_LIBRARY_PATH2, name, prop);
        if (access(path, R_OK) == 0) break;

        snprintf(path, sizeof(path), "%s/%s.%s.so",
                 HAL_LIBRARY_PATH1, name, prop);
        if (access(path, R_OK) == 0) break;
    } else {
        snprintf(path, sizeof(path), "%s/%s.default.so",
                 HAL_LIBRARY_PATH1, name);
        if (access(path, R_OK) == 0) break;
    }
}
static const char *variant_keys[] = {
    "ro.hardware",  /* This goes first so that it can pick up a different
                       file on the emulator. */
    "ro.product.board",
    "ro.board.platform",
    "ro.arch"
};

static const int HAL_VARIANT_KEYS_COUNT =
    (sizeof(variant_keys)/sizeof(variant_keys[0]));

/path/to/aosp/out/target/product/panda/system/build.prop
这里面有platform或者arch相关的数据

/*
 * load the symbols resolving undefined symbols before
 * dlopen returns. Since RTLD_GLOBAL is not or'd in with
 * RTLD_NOW the external symbols will not be global
 */
handle = dlopen(path, RTLD_NOW);

/* Get the address of the struct hal_module_info. */
const char *sym = HAL_MODULE_INFO_SYM_AS_STR;
hmi = (struct hw_module_t *)dlsym(handle, sym);
if (hmi == NULL) {
    ALOGE("load: couldn't find symbol %s", sym);
    status = -EINVAL;
    goto done;
}

*pHmi = hmi; // 动态加载Vendor HAL之后要返回这个hw_module_t结构体供Android Service/HAL层使用

这个load出来的HAL_MODULE_INFO_SYM_AS_STR是什么?

先看它是什么,定义位于hardware.h当中

/**
 * Name of the hal_module_info
 */
#define HAL_MODULE_INFO_SYM         HMI

/**
 * Name of the hal_module_info as a string
 */
#define HAL_MODULE_INFO_SYM_AS_STR  "HMI"

理论上来说dlsym就是会去load一个名字为”HMI”的变量/函数,也就是说在Vendor HAL当中必然有名字为”HMI”的这样一个东西。
看看hw_module_t的定义。

/**
 * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
 * and the fields of this data structure must begin with hw_module_t
 * followed by module specific information.
 */
typedef struct hw_module_t {
    /** tag must be initialized to HARDWARE_MODULE_TAG */
    uint32_t tag;

    /**
     * The API version of the implemented module. The module owner is
     * responsible for updating the version when a module interface has
     * changed.
     *
     * The derived modules such as gralloc and audio own and manage this field.
     * The module user must interpret the version field to decide whether or
     * not to inter-operate with the supplied module implementation.
     * For example, SurfaceFlinger is responsible for making sure that
     * it knows how to manage different versions of the gralloc-module API,
     * and AudioFlinger must know how to do the same for audio-module API.
     *
     * The module API version should include a major and a minor component.
     * For example, version 1.0 could be represented as 0x0100. This format
     * implies that versions 0x0100-0x01ff are all API-compatible.
     *
     * In the future, libhardware will expose a hw_get_module_version()
     * (or equivalent) function that will take minimum/maximum supported
     * versions as arguments and would be able to reject modules with
     * versions outside of the supplied range.
     */
    uint16_t module_api_version;
#define version_major module_api_version
    /**
     * version_major/version_minor defines are supplied here for temporary
     * source code compatibility. They will be removed in the next version.
     * ALL clients must convert to the new version format.
     */

    /**
     * The API version of the HAL module interface. This is meant to
     * version the hw_module_t, hw_module_methods_t, and hw_device_t
     * structures and definitions.
     *
     * The HAL interface owns this field. Module users/implementations
     * must NOT rely on this value for version information.
     *
     * Presently, 0 is the only valid value.
     */
    uint16_t hal_api_version;
#define version_minor hal_api_version

    /** Identifier of module */
    const char *id;

    /** Name of this module */
    const char *name;

    /** Author/owner/implementor of the module */
    const char *author;

    /** Modules methods */
    struct hw_module_methods_t* methods;

    /** module's dso */
    void* dso;

    /** padding to 128 bytes, reserved for future use */
    uint32_t reserved[32-7];

} hw_module_t;

定义位于hardware.h当中

对于TI的hardware来讲,load的就是camera.omap4.so(也就是前面所说的camera.$platform$.so)的名字为HMI的结构体。
这个需要仔细看下才能发现HAL_MODULE_INFO_SYM实际就是HMI,hardware.h当中定义的那个macro。
HMI位于
/path/to/aosp/hardware/ti/omap4xxx/camera/CameraHal_Module.cpp
当中

camera_module_t HAL_MODULE_INFO_SYM = {
    common: {
         tag: HARDWARE_MODULE_TAG,
         version_major: 1,
         version_minor: 0,
         id: CAMERA_HARDWARE_MODULE_ID,
         name: "TI OMAP CameraHal Module",
         author: "TI",
         methods: &camera_module_methods,
         dso: NULL, /* remove compilation warnings */
         reserved: {0}, /* remove compilation warnings */
    },
    get_number_of_cameras: camera_get_number_of_cameras,
    get_camera_info: camera_get_camera_info,
};

通过’arm-eabi-objdump’ -t camera.omap4.so | grep ‘HMI’
也可以验证这一点

而再仔细看看这个common实际上就是hw_module_t这个结构体,这也就印证了前面hw_module_t定义的地方所说的

/**
* Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
* and the fields of this data structure must begin with hw_module_t
* followed by module specific information.
*/

这里需要注意的是hardware.h当中定义的都是hw_module_t,但是Vendor HAL实现的都是自己的xxx_module_t,比如方法定义的类型和实际类型不一样,这个能工作有什么样的奥秘?
其实就是指针的第一个地址,Vendor HAL当中定义的结构必须要求第一个域是hw_module_t,这样地址就可以对应起来。
总之Android使用的这种技法需要理解,要不然都无法知道他们之间是怎么串起来的,指针强大,但小心使用,否则也可能伤身!
camera_device_t和hw_device_t也是一样的用法!

#define CAMERA_HARDWARE_MODULE_ID "camera"

定义位于camera_common.h当中

其他一些使用到的宏之类的

#ifndef _LINUX_LIMITS_H
#define _LINUX_LIMITS_H
#define NR_OPEN 1024
#define NGROUPS_MAX 65536
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ARG_MAX 131072
#define CHILD_MAX 999
#define OPEN_MAX 256
#define LINK_MAX 127
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define MAX_CANON 255
#define MAX_INPUT 255
#define NAME_MAX 255
#define PATH_MAX 4096
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define PIPE_BUF 4096
#define XATTR_NAME_MAX 255
#define XATTR_SIZE_MAX 65536
#define XATTR_LIST_MAX 65536
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define RTSIG_MAX 32
#endif

对于Camera HAL来说,你需要找到

typedef struct camera_device {
    /**
     * camera_device.common.version must be in the range
     * HARDWARE_DEVICE_API_VERSION(0,0)-(1,FF). CAMERA_DEVICE_API_VERSION_1_0 is
     * recommended.
     */
    hw_device_t common;
    camera_device_ops_t *ops;
    void *priv;
} camera_device_t;

位于camera.h当中

typedef struct camera_module {
    hw_module_t common;
    int (*get_number_of_cameras)(void);
    int (*get_camera_info)(int camera_id, struct camera_info *info);
} camera_module_t;

位于camera_common.h当中

扫平这些障碍之后,后面就会顺利多了。
比如takePicture这个动作,在CameraHal_Module当中会被于CameraHal关联起来,上层调用就会进入到CameraHal当中对应的方法

status_t CameraHal::takePicture( )
{
    status_t ret = NO_ERROR;
    CameraFrame frame;
    CameraAdapter::BuffersDescriptor desc;
    int burst;
    const char *valstr = NULL;
    unsigned int bufferCount = 1;

    Mutex::Autolock lock(mLock);

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

    gettimeofday(&mStartCapture, NULL);

#endif

    LOG_FUNCTION_NAME;

    if(!previewEnabled() && !mDisplayPaused)
        {
        LOG_FUNCTION_NAME_EXIT;
        CAMHAL_LOGEA("Preview not started...");
        return NO_INIT;
        }

    // return error if we are already capturing
    if ( (mCameraAdapter->getState() == CameraAdapter::CAPTURE_STATE &&
          mCameraAdapter->getNextState() != CameraAdapter::PREVIEW_STATE) ||
         (mCameraAdapter->getState() == CameraAdapter::VIDEO_CAPTURE_STATE &&
          mCameraAdapter->getNextState() != CameraAdapter::VIDEO_STATE) ) {
        CAMHAL_LOGEA("Already capturing an image...");
        return NO_INIT;
    }

    // we only support video snapshot if we are in video mode (recording hint is set)
    valstr = mParameters.get(TICameraParameters::KEY_CAP_MODE);
    if ( (mCameraAdapter->getState() == CameraAdapter::VIDEO_STATE) &&
         (valstr && strcmp(valstr, TICameraParameters::VIDEO_MODE)) ) {
        CAMHAL_LOGEA("Trying to capture while recording without recording hint set...");
        return INVALID_OPERATION;
    }

    if ( !mBracketingRunning )
        {

         if ( NO_ERROR == ret )
            {
            burst = mParameters.getInt(TICameraParameters::KEY_BURST);
            }

         //Allocate all buffers only in burst capture case
         if ( burst > 1 )
             {
             bufferCount = CameraHal::NO_BUFFERS_IMAGE_CAPTURE;
             if ( NULL != mAppCallbackNotifier.get() )
                 {
                 mAppCallbackNotifier->setBurst(true);
                 }
             }
         else
             {
             if ( NULL != mAppCallbackNotifier.get() )
                 {
                 mAppCallbackNotifier->setBurst(false);
                 }
             }

        // 这就是为什么我们正常拍照,preview就会stop住,拍完一张之后需要去startPreview
        // pause preview during normal image capture
        // do not pause preview if recording (video state)
        if (NO_ERROR == ret &&
                NULL != mDisplayAdapter.get() &&
                burst < 1) {
            if (mCameraAdapter->getState() != CameraAdapter::VIDEO_STATE) {
                mDisplayPaused = true;
                mPreviewEnabled = false;
                ret = mDisplayAdapter->pauseDisplay(mDisplayPaused);
                // since preview is paused we should stop sending preview frames too
                if(mMsgEnabled & CAMERA_MSG_PREVIEW_FRAME) {
                    mAppCallbackNotifier->disableMsgType (CAMERA_MSG_PREVIEW_FRAME);
                }
            }

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS
            mDisplayAdapter->setSnapshotTimeRef(&mStartCapture);
#endif
        }

        // if we taking video snapshot...
        if ((NO_ERROR == ret) && (mCameraAdapter->getState() == CameraAdapter::VIDEO_STATE)) {
            // enable post view frames if not already enabled so we can internally
            // save snapshot frames for generating thumbnail
            if((mMsgEnabled & CAMERA_MSG_POSTVIEW_FRAME) == 0) {
                mAppCallbackNotifier->enableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
            }
        }

        if ( (NO_ERROR == ret) && (NULL != mCameraAdapter) )
            {
            if ( NO_ERROR == ret )
                ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_QUERY_BUFFER_SIZE_IMAGE_CAPTURE,
                                                  ( int ) &frame,
                                                  bufferCount);

            if ( NO_ERROR != ret )
                {
                CAMHAL_LOGEB("CAMERA_QUERY_BUFFER_SIZE_IMAGE_CAPTURE returned error 0x%x", ret);
                }
            }

        if ( NO_ERROR == ret )
            {
            mParameters.getPictureSize(( int * ) &frame.mWidth,
                                       ( int * ) &frame.mHeight);

            // 分配buffer
            ret = allocImageBufs(frame.mWidth,
                                 frame.mHeight,
                                 frame.mLength,
                                 mParameters.getPictureFormat(),
                                 bufferCount);
            if ( NO_ERROR != ret )
                {
                CAMHAL_LOGEB("allocImageBufs returned error 0x%x", ret);
                }
            }

        if (  (NO_ERROR == ret) && ( NULL != mCameraAdapter ) )
            {
            desc.mBuffers = mImageBufs;
            desc.mOffsets = mImageOffsets;
            desc.mFd = mImageFd;
            desc.mLength = mImageLength;
            desc.mCount = ( size_t ) bufferCount;
            desc.mMaxQueueable = ( size_t ) bufferCount;

            ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_USE_BUFFERS_IMAGE_CAPTURE,
                                              ( int ) &desc);
            }
        }

    if ( ( NO_ERROR == ret ) && ( NULL != mCameraAdapter ) )
        {

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

         //pass capture timestamp along with the camera adapter command
        ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_START_IMAGE_CAPTURE,  (int) &mStartCapture);

#else

        ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_START_IMAGE_CAPTURE);

#endif

        }

    return ret;
}

经过BaseCameraAdapter(这个当中的takePicture实现只是Stub)会到
status_t OMXCameraAdapter::takePicture(),然后会将该动作加入到Command队列当中去执行,最终实现在OMXCapture::startImageCapture()当中。
这里就会透过OMX_FillThisBuffer压入buffer到capture port,当capture完成之后会收到FillBufferDone这个callback,进入回调函数。

OMX_ERRORTYPE OMXCameraAdapter::OMXCameraGetHandle(OMX_HANDLETYPE *handle, OMX_PTR pAppData )
{
    OMX_ERRORTYPE eError = OMX_ErrorUndefined;

    for ( int i = 0; i < 5; ++i ) {
        if ( i > 0 ) {
            // sleep for 100 ms before next attempt
            usleep(100000);
        }

        // setup key parameters to send to Ducati during init
        OMX_CALLBACKTYPE oCallbacks;

        // initialize the callback handles // 注册callback
        oCallbacks.EventHandler    = android::OMXCameraAdapterEventHandler;
        oCallbacks.EmptyBufferDone = android::OMXCameraAdapterEmptyBufferDone;
        oCallbacks.FillBufferDone  = android::OMXCameraAdapterFillBufferDone;

        // get handle
        eError = OMX_GetHandle(handle, (OMX_STRING)"OMX.TI.DUCATI1.VIDEO.CAMERA", pAppData, &oCallbacks);
        // 注意这个library,实际就是/path/to/aosp/hardware/ti/omap4xxx/domx/omx_proxy_component/omx_camera/src/omx_proxy_camera.c
        // 最终通过pRPCCtx->fd_omx = open("/dev/rpmsg-omx1", O_RDWR);跟底层交互
        if ( eError == OMX_ErrorNone ) {
            return OMX_ErrorNone;
        }

        CAMHAL_LOGEB("OMX_GetHandle() failed, error: 0x%x", eError);
    }

    *handle = 0;
    return eError;
}

有了回调函数就会一直往上层传,最终就用户就得到了拍摄的照片数据。
CameraAdapter_Factory(size_t sensor_index)在OMXCameraAdapter当中。

另外TI Omap还有实现一套通过V4L2交互的方式,只是默认没有启用。
它是经过V4LCameraAdapter::UseBuffersPreview
最终会走向V4L2中去

而CameraAdapter_Factory()在V4LCameraAdapter当中。

Android MediaRecorder系统结构

前面有分析过Camera的实现,现在来看看MediaRecorder的实现,这里我不会太去关注它的分层结构,我更关注它的逻辑!

APP层 /path/to/aosp/frameworks/base/media/java/android/media/MediaRecorder.java
JNI层 /path/to/aosp/frameworks/base/media/jni/android_media_MediaRecorder.cpp
调用NATIVE层的MediaRecorder(这里是BnMediaRecorderClient)
header /path/to/aosp/frameworks/av/include/media/mediarecorder.h
implementation /path/to/aosp/frameworks/av/media/libmedia/mediarecorder.cpp

MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL)
{
    ALOGV("constructor");

    const sp<IMediaPlayerService>& service(getMediaPlayerService());
    if (service != NULL) {
        mMediaRecorder = service->createMediaRecorder(getpid());
    }
    if (mMediaRecorder != NULL) {
        mCurrentState = MEDIA_RECORDER_IDLE;
    }

    doCleanUp();
}

getMediaPlayerService()这个方法位于/path/to/aosp/frameworks/av/include/media/IMediaDeathNotifier.h

获取到MediaPlayerService(这个是BpMediaPlayerService)之后
调用IMediaPlayerService当中的

sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(pid_t pid)
{
    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid);
    wp<MediaRecorderClient> w = recorder;
    Mutex::Autolock lock(mLock);
    mMediaRecorderClients.add(w);
    ALOGV("Create new media recorder client from pid %d", pid);
    return recorder;
}

创建MediaRecorderClient(这里是BnMediaRecorder)

但是通过binder拿到的是BpMediaRecorder
因为有如下的interface_cast过程

virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid)
{
    Parcel data, reply;
    data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
    data.writeInt32(pid);
    remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
    return interface_cast<IMediaRecorder>(reply.readStrongBinder());
}

而MediaRecorderClient当中又会创建StagefrightRecorder(MediaRecorderBase),它位于
/path/to/aosp/frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp

目前我们可以认为在APP/JNI/NATIVE这边是在一个进程当中,在MediaPlayerService当中的MediaRecorderClient/StagefrightRecorder是在另外一个进程当中,他们之间通过binder通信,而且Bp和Bn我们也都有拿到,后面我们将不再仔细区分Bp和Bn。

客户端这边
BnMediaRecorderClient
BpMediaRecorder
BpMediaPlayerService

服务端这边
BpMediaRecorderClient(如果需要通知客户端的话,它可以获得这个Bp)
BnMediaRecorder
BnMediaPlayerService

这有张图(点过去看原始大图)
Android MediaRecorder Diagram

我们以开始录影为例子,比如start()

在这里就兵分两路,一个CameraSource,一个MPEG4Writer(sp mWriter)
这两个class都位于/path/to/aosp/frameworks/av/media/libstagefright/当中

status_t StagefrightRecorder::startMPEG4Recording() {
    int32_t totalBitRate;
    status_t err = setupMPEG4Recording(
            mOutputFd, mVideoWidth, mVideoHeight,
            mVideoBitRate, &totalBitRate, &mWriter);
    if (err != OK) {
        return err;
    }

    int64_t startTimeUs = systemTime() / 1000;
    sp<MetaData> meta = new MetaData;
    setupMPEG4MetaData(startTimeUs, totalBitRate, &meta);

    err = mWriter->start(meta.get());
    if (err != OK) {
        return err;
    }

    return OK;
}
status_t StagefrightRecorder::setupMPEG4Recording(
        int outputFd,
        int32_t videoWidth, int32_t videoHeight,
        int32_t videoBitRate,
        int32_t *totalBitRate,
        sp<MediaWriter> *mediaWriter) {
    mediaWriter->clear();
    *totalBitRate = 0;
    status_t err = OK;
    sp<MediaWriter> writer = new MPEG4Writer(outputFd);

    if (mVideoSource < VIDEO_SOURCE_LIST_END) {

        sp<MediaSource> mediaSource;
        err = setupMediaSource(&mediaSource); // very important
        if (err != OK) {
            return err;
        }

        sp<MediaSource> encoder;
        err = setupVideoEncoder(mediaSource, videoBitRate, &encoder); // very important
        if (err != OK) {
            return err;
        }

        writer->addSource(encoder);
        *totalBitRate += videoBitRate;
    }

    // Audio source is added at the end if it exists.
    // This help make sure that the "recoding" sound is suppressed for
    // camcorder applications in the recorded files.
    if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
        err = setupAudioEncoder(writer); // very important
        if (err != OK) return err;
        *totalBitRate += mAudioBitRate;
    }

    ...

    writer->setListener(mListener);
    *mediaWriter = writer;
    return OK;
}
// Set up the appropriate MediaSource depending on the chosen option
status_t StagefrightRecorder::setupMediaSource(
                      sp<MediaSource> *mediaSource) {
    if (mVideoSource == VIDEO_SOURCE_DEFAULT
            || mVideoSource == VIDEO_SOURCE_CAMERA) {
        sp<CameraSource> cameraSource;
        status_t err = setupCameraSource(&cameraSource);
        if (err != OK) {
            return err;
        }
        *mediaSource = cameraSource;
    } else if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
        // If using GRAlloc buffers, setup surfacemediasource.
        // Later a handle to that will be passed
        // to the client side when queried
        status_t err = setupSurfaceMediaSource();
        if (err != OK) {
            return err;
        }
        *mediaSource = mSurfaceMediaSource;
    } else {
        return INVALID_OPERATION;
    }
    return OK;
}
status_t StagefrightRecorder::setupCameraSource(
        sp<CameraSource> *cameraSource) {
    status_t err = OK;
    if ((err = checkVideoEncoderCapabilities()) != OK) {
        return err;
    }
    Size videoSize;
    videoSize.width = mVideoWidth;
    videoSize.height = mVideoHeight;
    if (mCaptureTimeLapse) {
        if (mTimeBetweenTimeLapseFrameCaptureUs < 0) {
            ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
                mTimeBetweenTimeLapseFrameCaptureUs);
            return BAD_VALUE;
        }

        mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
                mCamera, mCameraProxy, mCameraId,
                videoSize, mFrameRate, mPreviewSurface,
                mTimeBetweenTimeLapseFrameCaptureUs);
        *cameraSource = mCameraSourceTimeLapse;
    } else {
        *cameraSource = CameraSource::CreateFromCamera(
                mCamera, mCameraProxy, mCameraId, videoSize, mFrameRate,
                mPreviewSurface, true /*storeMetaDataInVideoBuffers*/);
    }
    mCamera.clear();
    mCameraProxy.clear();
    if (*cameraSource == NULL) {
        return UNKNOWN_ERROR;
    }

    if ((*cameraSource)->initCheck() != OK) {
        (*cameraSource).clear();
        *cameraSource = NULL;
        return NO_INIT;
    }

    // When frame rate is not set, the actual frame rate will be set to
    // the current frame rate being used.
    if (mFrameRate == -1) {
        int32_t frameRate = 0;
        CHECK ((*cameraSource)->getFormat()->findInt32(
                    kKeyFrameRate, &frameRate));
        ALOGI("Frame rate is not explicitly set. Use the current frame "
             "rate (%d fps)", frameRate);
        mFrameRate = frameRate;
    }

    CHECK(mFrameRate != -1);

    mIsMetaDataStoredInVideoBuffers =
        (*cameraSource)->isMetaDataStoredInVideoBuffers();

    return OK;
}
status_t StagefrightRecorder::setupVideoEncoder(
        sp<MediaSource> cameraSource,
        int32_t videoBitRate,
        sp<MediaSource> *source) {
    source->clear();

    sp<MetaData> enc_meta = new MetaData;
    enc_meta->setInt32(kKeyBitRate, videoBitRate);
    enc_meta->setInt32(kKeyFrameRate, mFrameRate);

    switch (mVideoEncoder) {
        case VIDEO_ENCODER_H263:
            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
            break;

        case VIDEO_ENCODER_MPEG_4_SP:
            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
            break;

        case VIDEO_ENCODER_H264:
            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
            break;

        default:
            CHECK(!"Should not be here, unsupported video encoding.");
            break;
    }

    sp<MetaData> meta = cameraSource->getFormat();

    int32_t width, height, stride, sliceHeight, colorFormat;
    CHECK(meta->findInt32(kKeyWidth, &width));
    CHECK(meta->findInt32(kKeyHeight, &height));
    CHECK(meta->findInt32(kKeyStride, &stride));
    CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
    CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));

    enc_meta->setInt32(kKeyWidth, width);
    enc_meta->setInt32(kKeyHeight, height);
    enc_meta->setInt32(kKeyIFramesInterval, mIFramesIntervalSec);
    enc_meta->setInt32(kKeyStride, stride);
    enc_meta->setInt32(kKeySliceHeight, sliceHeight);
    enc_meta->setInt32(kKeyColorFormat, colorFormat);
    if (mVideoTimeScale > 0) {
        enc_meta->setInt32(kKeyTimeScale, mVideoTimeScale);
    }
    if (mVideoEncoderProfile != -1) {
        enc_meta->setInt32(kKeyVideoProfile, mVideoEncoderProfile);
    }
    if (mVideoEncoderLevel != -1) {
        enc_meta->setInt32(kKeyVideoLevel, mVideoEncoderLevel);
    }

    OMXClient client;
    CHECK_EQ(client.connect(), (status_t)OK);

    uint32_t encoder_flags = 0;
    if (mIsMetaDataStoredInVideoBuffers) {
        encoder_flags |= OMXCodec::kStoreMetaDataInVideoBuffers;
    }

    // Do not wait for all the input buffers to become available.
    // This give timelapse video recording faster response in
    // receiving output from video encoder component.
    if (mCaptureTimeLapse) {
        encoder_flags |= OMXCodec::kOnlySubmitOneInputBufferAtOneTime;
    }

    sp<MediaSource> encoder = OMXCodec::Create(
            client.interface(), enc_meta,
            true /* createEncoder */, cameraSource,
            NULL, encoder_flags);
    if (encoder == NULL) {
        ALOGW("Failed to create the encoder");
        // When the encoder fails to be created, we need
        // release the camera source due to the camera's lock
        // and unlock mechanism.
        cameraSource->stop();
        return UNKNOWN_ERROR;
    }

    *source = encoder;

    return OK;
}

这里和OMXCodec关联起来
有一个叫media_codecs.xml的配置文件来表明设备支持哪些codec

我们录制MPEG 4的时候还会有声音,所以后面还有个setupAudioEncoder,具体的方法就不展开了,总之就是把声音也作为一个Track加入到MPEG4Writer当中去。
这里插个题外话,Google说把setupAudioEncoder放到后面是为了避免开始录影的那一个提示声音也被录制进去,但是实际发现它这样做还是会有bug,在一些设备上还是会把那声录制进去,这个遇到的都是靠APP自己来播放声音来绕过这个问题的。

另外MPEG4Writer当中有个
start(MetaData*)
启动两个方法
a) startWriterThread

启动一个thread去写

    void MPEG4Writer::threadFunc() {
        ALOGV("threadFunc");

        prctl(PR_SET_NAME, (unsigned long)"MPEG4Writer", 0, 0, 0);

        Mutex::Autolock autoLock(mLock);
        while (!mDone) {
            Chunk chunk;
            bool chunkFound = false;

            while (!mDone && !(chunkFound = findChunkToWrite(&chunk))) {
                mChunkReadyCondition.wait(mLock);
            }

            // Actual write without holding the lock in order to
            // reduce the blocking time for media track threads.
            if (chunkFound) {
                mLock.unlock();
                writeChunkToFile(&chunk);
                mLock.lock();
            }
        }

        writeAllChunks();
    }

b) startTracks

    status_t MPEG4Writer::startTracks(MetaData *params) {
        for (List<Track *>::iterator it = mTracks.begin();
             it != mTracks.end(); ++it) {
            status_t err = (*it)->start(params);

            if (err != OK) {
                for (List<Track *>::iterator it2 = mTracks.begin();
                     it2 != it; ++it2) {
                    (*it2)->stop();
                }

                return err;
            }
        }
        return OK;
    }

然后调用每个Track的start方法

    status_t MPEG4Writer::Track::start(MetaData *params) {
        ...

        initTrackingProgressStatus(params);

        ...

        status_t err = mSource->start(meta.get()); // 这里会去执行CameraSource(start),这两个是相互关联的

        ...

        pthread_create(&mThread, &attr, ThreadWrapper, this);
        return OK;
    }

    void *MPEG4Writer::Track::ThreadWrapper(void *me) {
        Track *track = static_cast<Track *>(me);

        status_t err = track->threadEntry();
        return (void *) err;
    }

通过status_t MPEG4Writer::Track::threadEntry()
是新启动另外一个thread,它里面会通过一个循环来不断读取CameraSource(read)里面的数据,CameraSource里面的数据当然是从driver返回过来的(可以参见CameraSourceListener,CameraSource用一个叫做mFrameReceived的List专门存放从driver过来的数据,如果收到数据会调用mFrameAvailableCondition.signal,若还没有开始录影,这个时候收到的数据是被丢弃的,当然MediaWriter先启动的是CameraSource的start方法,再启动写Track),然后写到文件当中。
注意:准确来说这里MPEG4Writer读取的是OMXCodec里的数据,因为数据先到CameraSource,codec负责编码之后,MPEG4Writer才负责写到文件当中!关于数据在CameraSource/OMXCodec/MPEG4Writer之间是怎么传递的,可以参见http://guoh.org/lifelog/2013/06/interaction-between-stagefright-and-codec/当中讲Buffer的传输过程。

回头再来看,Stagefright做了什么事情?我更觉得它只是一个粘合剂(glue)的用处,它工作在MediaPlayerService这一层,把MediaSource,MediaWriter,Codec以及上层的MediaRecorder绑定在一起,这应该就是它最大的作用,Google用它来替换Opencore也是符合其一贯的工程派作风(相比复杂的学术派而言,虽然Google很多东西也很复杂,但是它一般都是以尽量简单的方式来解决问题)。
让大家觉得有点不习惯的是,它把MediaRecorder放在MediaPlayerService当中,这两个看起来是对立的事情,或者某一天它们会改名字,或者是两者分开,不知道~~

当然这只是个简单的大体介绍,Codec相关的后面争取专门来分析一下!

有些细节的东西在这里没有列出,需要的话会把一些注意点列出来:

1. 时光流逝录影
CameraSource对应的就是CameraSourceTimeLapse

具体做法就是在
dataCallbackTimestamp
当中有skipCurrentFrame

当然它是用些变量来记录和计算
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate) // 两个frame之间的间隔时间
记录上一个frame的(mLastTimeLapseFrameRealTimestampUs) // 上一个frame发生的时间
然后通过frame rate计算出两个frame之间的相距离时间,中间的都透过releaseOneRecordingFrame来drop掉
也就是说driver返回的东西都不变,只是在SW这层我们自己来处理掉

关于Time-lapse相关的可以参阅
https://en.wikipedia.org/wiki/Time-lapse_photography

2. 录影当中需要用到Camera的话是通过ICameraRecordingProxy,即Camera当中的RecordingProxy(这是一个BnCameraRecordingProxy)
当透过binder,将ICameraRecordingProxy传到服务端进程之后,它就变成了Bp,如下:

case SET_CAMERA: {
    ALOGV("SET_CAMERA");
    CHECK_INTERFACE(IMediaRecorder, data, reply);
    sp<ICamera> camera = interface_cast<ICamera>(data.readStrongBinder());
    sp<ICameraRecordingProxy> proxy =
        interface_cast<ICameraRecordingProxy>(data.readStrongBinder());
    reply->writeInt32(setCamera(camera, proxy));
    return NO_ERROR;
} break;

在CameraSource当中会这样去使用

// We get the proxy from Camera, not ICamera. We need to get the proxy
// to the remote Camera owned by the application. Here mCamera is a
// local Camera object created by us. We cannot use the proxy from
// mCamera here.
mCamera = Camera::create(camera);
if (mCamera == 0) return -EBUSY;
mCameraRecordingProxy = proxy;
mCameraFlags |= FLAGS_HOT_CAMERA;

疑问点:

CameraSource当中这个
List > mFramesBeingEncoded;
有什么用?
每编码完一个frame,CameraSource就会将其保存起来,Buffer被release的时候,会反过来release掉这些frame(s),这种做法是为了效率么?为什么不编码完一个frame就将其release掉?
另外不得不再感叹下Google经常的delete this;行为,精妙,但是看起来反常!

Android Camera系统结构

说到架构/构架就觉得心虚,感觉这两个字太高端了,所以我只敢用结构来表达。

以下表述如果存在错误,请指教!

之前有在一篇博客中提到Camera相关的东西
http://guoh.org/lifelog/2012/09/asop-camera-source-code-reading-video-snapshot/

比如

$ANDROID_SRC_HOME/frameworks/base/core/java/android/hardware/Camera.java
$ANDROID_SRC_HOME/frameworks/base/core/jni/android_hardware_Camera.cpp

// 注:目前以下这两个文件在JB当中应该被移动位置了,由此可见Android也是个活跃的项目

$ANDROID_SRC_HOME/frameworks/base/include/camera/Camera.h
$ANDROID_SRC_HOME/frameworks/base/libs/camera/Camera.cpp

这篇博客是阅读http://source.android.com/devices/camera.html,结合在JB代码的基础上写的笔记,可能不是那么详细和有逻辑,只关注重点和要注意的地方,所以如果你刚好读到,但是你又没有什么Android Camera的基础的话,建议还是先看看http://developer.android.com/guide/topics/media/camera.htmlReference

正式开始
大体来说Camera分为这么多部分
APP — JNI — NATIVE — BINDER — CAMERA SERVICE — HAL — DRIVER

下面列出这每一部分对应于AOSP当中的代码

APP($ANDROID_SRC_HOME/frameworks/base/core/java/android/hardware/Camera.java)

JNI($ANDROID_SRC_HOME/frameworks/base/core/jni/android_hardware_Camera.cpp)

NATIVE($ANDROID_SRC_HOME/frameworks/av/camera/Camera.cpp)

BINDER($ANDROID_SRC_HOME/frameworks/av/camera/ICameraService.cpp
       $ANDROID_SRC_HOME/frameworks/av/camera/ICameraClient.cpp)

CAMERA SERVICE($ANDROID_SRC_HOME/frameworks/av/services/camera/libcameraservice/CameraService.cpp)

HAL($ANDROID_SRC_HOME/hardware/libhardware/include/hardware/camera.h
    $ANDROID_SRC_HOME/hardware/libhardware/include/hardware/camera_common.h
    $ANDROID_SRC_HOME/frameworks/av/services/camera/libcameraservice/CameraHardwareInterface.h)

对于Galaxy Nexus来讲,HAL的具体实现位于

$ANDROID_SRC_HOME/hardware/ti/omap4xxx/camera/CameraHal.cpp
$ANDROID_SRC_HOME/hardware/ti/omap4xxx/camera/CameraHalCommon.cpp

头文件位于

$ANDROID_SRC_HOME/hardware/ti/omap4xxx/camera/inc/

对于Galaxy Nexus来讲

DRIVER(https://developers.google.com/android/nexus/drivers)

不知道看了对Camera有没有个大体的认识?
大致了解了的话,就翻代码出来看看自己感兴趣的部分吧

你可以通过repo拉取所有代码到本地看,也可以在线查看。
android/hardware/Camera.java

public static Camera open(int cameraId) {
    return new Camera(cameraId);
}
Camera(int cameraId) {
    mShutterCallback = null;
    mRawImageCallback = null;
    mJpegCallback = null;
    mPreviewCallback = null;
    mPostviewCallback = null;
    mZoomListener = null;

    Looper looper;
    if ((looper = Looper.myLooper()) != null) {
        mEventHandler = new EventHandler(this, looper);
    } else if ((looper = Looper.getMainLooper()) != null) {
        mEventHandler = new EventHandler(this, looper);
    } else {
        mEventHandler = null;
    }

    native_setup(new WeakReference<Camera>(this), cameraId);
}

以上代码描述了我们所要研究的东西的基础,开启camera,跟所有的硬件一样,使用之前需要先开启。
这里有个需要注意的地方就是mEventHandler,这个变量是做什么用的,文档有解释的比较清楚,如下:

Callbacks from other methods are delivered to the event loop of the
thread which called open(). If this thread has no event loop, then
callbacks are delivered to the main application event loop. If there
is no main application event loop, callbacks are not delivered.

如果你不是写一个玩具程序的话,你就应当注意这里的描述。

随着native_setup(new WeakReference(this), cameraId);,代码走到了
android_hardware_Camera.cpp

// connect to camera service
static void android_hardware_Camera_native_setup(JNIEnv *env, jobject thiz,
    jobject weak_this, jint cameraId)
{
    sp<Camera> camera = Camera::connect(cameraId);

    if (camera == NULL) {
        jniThrowRuntimeException(env, "Fail to connect to camera service");
        return;
    }

    // make sure camera hardware is alive
    if (camera->getStatus() != NO_ERROR) {
        jniThrowRuntimeException(env, "Camera initialization failed");
        return;
    }

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        jniThrowRuntimeException(env, "Can't find android/hardware/Camera");
        return;
    }

    // We use a weak reference so the Camera object can be garbage collected.
    // The reference is only used as a proxy for callbacks.
    sp<JNICameraContext> context = new JNICameraContext(env, weak_this, clazz, camera);
    context->incStrong(thiz);
    camera->setListener(context);

    // save context in opaque field
    env->SetIntField(thiz, fields.context, (int)context.get());
}

随着sp camera = Camera::connect(cameraId);,代码走到了
av/include/camera/Camera.h,当然实现都在av/camera/Camera.cpp

sp<Camera> Camera::connect(int cameraId)
{
    ALOGV("connect");
    sp<Camera> c = new Camera();
    const sp<ICameraService>& cs = getCameraService();
    if (cs != 0) {
        c->mCamera = cs->connect(c, cameraId);
    }
    if (c->mCamera != 0) {
        c->mCamera->asBinder()->linkToDeath(c);
        c->mStatus = NO_ERROR;
    } else {
        c.clear();
    }
    return c;
}

这里getCameraService()是一个比较关键的东西,需要仔细看看。

// establish binder interface to camera service
const sp<ICameraService>& Camera::getCameraService()
{
    Mutex::Autolock _l(mLock);
    if (mCameraService.get() == 0) {
        sp<IServiceManager> sm = defaultServiceManager();
        sp<IBinder> binder;
        do {
            binder = sm->getService(String16("media.camera"));
            if (binder != 0)
                break;
            ALOGW("CameraService not published, waiting...");
            usleep(500000); // 0.5 s
        } while(true);
        if (mDeathNotifier == NULL) {
            mDeathNotifier = new DeathNotifier();
        }
        binder->linkToDeath(mDeathNotifier);
        mCameraService = interface_cast<ICameraService>(binder);
    }
    ALOGE_IF(mCameraService==0, "no CameraService!?");
    return mCameraService;
}

然后就是av/camera/ICameraService.cpp当中代理类BpCameraService

// connect to camera service
virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId)
{
    Parcel data, reply;
    data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
    data.writeStrongBinder(cameraClient->asBinder());
    data.writeInt32(cameraId);
    remote()->transact(BnCameraService::CONNECT, data, &reply);
    return interface_cast<ICamera>(reply.readStrongBinder());
}

最终转移到BnCameraServiceconnect方法,但是这是个纯虚函数,而且CameraService继承了BnCameraService,所以请看
av/services/camera/libcameraservice/CameraService.h,当然具体实现还是在av/services/camera/libcameraservice/CameraService.cpp当中。

sp<ICamera> CameraService::connect(
        const sp<ICameraClient>& cameraClient, int cameraId) {
    int callingPid = getCallingPid();

    LOG1("CameraService::connect E (pid %d, id %d)", callingPid, cameraId);

    if (!mModule) {
        ALOGE("Camera HAL module not loaded");
        return NULL;
    }

    sp<Client> client;
    if (cameraId < 0 || cameraId >= mNumberOfCameras) {
        ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).",
            callingPid, cameraId);
        return NULL;
    }

    char value[PROPERTY_VALUE_MAX];
    property_get("sys.secpolicy.camera.disabled", value, "0");
    if (strcmp(value, "1") == 0) {
        // Camera is disabled by DevicePolicyManager.
        ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid);
        return NULL;
    }

    Mutex::Autolock lock(mServiceLock);
    if (mClient[cameraId] != 0) {
        client = mClient[cameraId].promote();
        if (client != 0) {
            if (cameraClient->asBinder() == client->getCameraClient()->asBinder()) {
                LOG1("CameraService::connect X (pid %d) (the same client)",
                     callingPid);
                return client;
            } else {
                ALOGW("CameraService::connect X (pid %d) rejected (existing client).",
                      callingPid);
                return NULL;
            }
        }
        mClient[cameraId].clear();
    }

    if (mBusy[cameraId]) {
        ALOGW("CameraService::connect X (pid %d) rejected"
                " (camera %d is still busy).", callingPid, cameraId);
        return NULL;
    }

    struct camera_info info;
    if (mModule->get_camera_info(cameraId, &info) != OK) {
        ALOGE("Invalid camera id %d", cameraId);
        return NULL;
    }

    int deviceVersion;
    if (mModule->common.module_api_version == CAMERA_MODULE_API_VERSION_2_0) {
        deviceVersion = info.device_version;
    } else {
        deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
    }

    switch(deviceVersion) {
      case CAMERA_DEVICE_API_VERSION_1_0:
        client = new CameraClient(this, cameraClient, cameraId,
                info.facing, callingPid, getpid());
        break;
      case CAMERA_DEVICE_API_VERSION_2_0:
        client = new Camera2Client(this, cameraClient, cameraId,
                info.facing, callingPid, getpid());
        break;
      default:
        ALOGE("Unknown camera device HAL version: %d", deviceVersion);
        return NULL;
    }

    if (client->initialize(mModule) != OK) {
        return NULL;
    }

    cameraClient->asBinder()->linkToDeath(this);

    mClient[cameraId] = client;
    LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, getpid());
    return client;
}

比如我们这里以HAL 1.0来看,参见
av/services/camera/libcameraservice/CameraClient.h

av/services/camera/libcameraservice/CameraClient.cpp

CameraClient::CameraClient(const sp<CameraService>& cameraService,
        const sp<ICameraClient>& cameraClient,
        int cameraId, int cameraFacing, int clientPid, int servicePid):
        Client(cameraService, cameraClient,
                cameraId, cameraFacing, clientPid, servicePid)
{
    int callingPid = getCallingPid();
    LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);

    mHardware = NULL;
    mMsgEnabled = 0;
    mSurface = 0;
    mPreviewWindow = 0;
    mDestructionStarted = false;

    // Callback is disabled by default
    mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
    mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
    mPlayShutterSound = true;
    LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
status_t CameraClient::initialize(camera_module_t *module) {
    int callingPid = getCallingPid();
    LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);

    char camera_device_name[10];
    status_t res;
    snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);

    mHardware = new CameraHardwareInterface(camera_device_name);
    res = mHardware->initialize(&module->common);
    if (res != OK) {
        ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
                __FUNCTION__, mCameraId, strerror(-res), res);
        mHardware.clear();
        return NO_INIT;
    }

    mHardware->setCallbacks(notifyCallback,
            dataCallback,
            dataCallbackTimestamp,
            (void *)mCameraId);

    // Enable zoom, error, focus, and metadata messages by default
    enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
                  CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);

    LOG1("CameraClient::initialize X (pid %d, id %d)", callingPid, mCameraId);
    return OK;
}

当然还会用到
libhardware//include/hardware/camera.h

libhardware//include/hardware/camera_common.h

av/services/camera/libcameraservice/CameraHardwareInterface.h

status_t initialize(hw_module_t *module)
{
    ALOGI("Opening camera %s", mName.string());
    int rc = module->methods->open(module, mName.string(),
                                   (hw_device_t **)&mDevice);
    if (rc != OK) {
        ALOGE("Could not open camera %s: %d", mName.string(), rc);
        return rc;
    }
    initHalPreviewWindow();
    return rc;
}

这只是一个接口,具体的实现是厂商来做的啦

以上是从上层调用到底层,接着我们还会看一个从底层调用到上层的情况。

从代码看出有绑定3个callback到driver

mHardware->setCallbacks(notifyCallback,
        dataCallback,
        dataCallbackTimestamp,
        (void *)mCameraId);

所以这三个callback会有机会被call到
以dataCallback为例子,其中有一个是

// picture callback - compressed picture ready
void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
    disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);

    sp<ICameraClient> c = mCameraClient;
    mLock.unlock();
    if (c != 0) {
        c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
    }
}

通过ICameraClient调用会app,这里的ICameraClient是BpCameraClient(为什么是Bp,后面会稍加解释)

// generic data callback from camera service to app with image data
void BpCameraClient::dataCallback(int32_t msgType, const sp<IMemory>& imageData,
                  camera_frame_metadata_t *metadata)
{
    ALOGV("dataCallback");
    Parcel data, reply;
    data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
    data.writeInt32(msgType);
    data.writeStrongBinder(imageData->asBinder());
    if (metadata) {
        data.writeInt32(metadata->number_of_faces);
        data.write(metadata->faces, sizeof(camera_face_t) * metadata->number_of_faces);
    }
    remote()->transact(DATA_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
}

于是

status_t BnCameraClient::onTransact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    switch(code) {
        case NOTIFY_CALLBACK: {
            ...
        } break;
        case DATA_CALLBACK: {
            ALOGV("DATA_CALLBACK");
            CHECK_INTERFACE(ICameraClient, data, reply);
            int32_t msgType = data.readInt32();
            sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
            camera_frame_metadata_t *metadata = NULL;
            if (data.dataAvail() > 0) {
                metadata = new camera_frame_metadata_t;
                metadata->number_of_faces = data.readInt32();
                metadata->faces = (camera_face_t *) data.readInplace(
                        sizeof(camera_face_t) * metadata->number_of_faces);
            }
            dataCallback(msgType, imageData, metadata); // 因为pure virtual function的关系,所以会call到Camera当中的实现
            if (metadata) delete metadata;
            return NO_ERROR;
        } break;
        case DATA_CALLBACK_TIMESTAMP: {
            ...
        } break;
        default:
            return BBinder::onTransact(code, data, reply, flags);
    }
}

于是

// callback from camera service when frame or image is ready
void Camera::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
                          camera_frame_metadata_t *metadata)
{
    sp<CameraListener> listener;
    {
        Mutex::Autolock _l(mLock);
        listener = mListener;
    }
    if (listener != NULL) {
        listener->postData(msgType, dataPtr, metadata);
    }
}

从app call到service是BnCameraClient(client是实体,service是代理,从new的代码就可以观察出来)
从service call到app是BpCameraClient(service是实体,client是代理)
而这个Bn到Bp的转换是通过

sp<ICameraClient> cameraClient = interface_cast<ICameraClient>(data.readStrongBinder());

完成的。

后面我们会列举些需要注意的点:

ICameraClient继承自IInterface
BnCameraClient继承自BnInterface,然而BnInterface又继承自ICameraClient,BBinder

Camera继承自BnCameraClient

ICameraService继承自IInterface
BnCameraService继承自BnInterface,然而BnInterface又继承自ICameraService,BBinder

ICamera继承自IInterface
BnCamera继承自BnInterface,然而BnInterface又继承自ICamera,BBinder

CameraService继承自BinderService,BnCameraService,IBinder::DeathRecipient
而BinderService主要是用来实例化service的统一接口

CameraService有个嵌套类Client继承自BnCamera
真正的实现大部分在CameraClient当中(Camera2Client是Google提供的调用新的HAL的一种方式,部分厂商会用这样的方式来实现自己的HAL,比如Qualcomm)
这才是真正的client,直接调用HW或者被HW调用。

sendCommand往driver送参数的时候会先检验是不是CameraService这层可以处理的东西(有些东西不需要到driver的)

cmd == CAMERA_CMD_SET_DISPLAY_ORIENTATION
cmd == CAMERA_CMD_ENABLE_SHUTTER_SOUND
cmd == CAMERA_CMD_PLAY_RECORDING_SOUND
cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT
cmd == CAMERA_CMD_PING
以上这些都CameraService自己处理了
其他均丢给driver(mHardware->sendCommand(cmd, arg1, arg2);)

接下来是一些疑问点:

// these are initialized in the constructor.
sp mHardware; // cleared after disconnect()
mHardware在disconnect当中被clear

// Make sure disconnect() is done once and once only, whether it is called
// from the user directly, or called by the destructor.
if (mHardware == 0) return;
这是防止disconnect调用一次之后,析构函数又调用一次。

读AOSP的Camera源码之录影中拍摄

写Camera这一系列之前,先把Camera主要的概况介绍下(上一篇没有写,这基本都会是一些比较简单而简短的文字记录)

参照 http://developer.android.com/reference/android/hardware/Camera.html

整个拍照的过程Docs上也有写的很清楚
1. open camera
2. get/set parameters
3. set display orientation
4. set surface
5. start preview
6. take picture
7. 拍照之后preview display会自动停止,如果还想继续拍摄就需要再次执行step 5
8. stop preview
9. release camera

这当中有些是必须的,有些不是

学习Camera最好熟悉几块重要的部分代码,这样比较容易理解,因为这是一个硬件和软件结合比较紧密的话题。
大多数时候我们只需要熟悉各个参数,然后将他们通过set parameter传递给hardware层就能完成我们需要的大部分功能。

$ANDROID_SRC_HOME/frameworks/base/core/java/android/hardware/Camera.java

$ANDROID_SRC_HOME/frameworks/base/core/jni/android_hardware_Camera.cpp

$ANDROID_SRC_HOME/frameworks/base/include/camera/Camera.h
$ANDROID_SRC_HOME/frameworks/base/libs/camera/Camera.cpp

这几个文件已经基本可以让我们从APP的层面了解Camera了

Camera最重要的任务就是拍摄出最好的照片,APP的parameter设置下去,发现出来的片子不是我们想要的,这时就要分析是我们参数下错了,还是底层出错了。
这都是我们APP这层需要考虑的。

对于用户来说,最理想的状态就是什么都不用设置,按下拍摄,就能出来最好的片子,这就是为什么卡片机被发明出来,而且还这么受到欢迎,虽然卡片机还是无法做到什么都不设置,就能出来最佳效果的片子。

概况介绍就到这里,很简单。
下面接着说今天的主题,录影中拍摄

如今Samsung和HTC都打出边录边拍的功能,对于APP来说实现这种功能不难,只要机器的芯片支持这个功能,对于APP来说就比较简单了。

这个功能的学名叫做Video Snapshot

Android有这么一个API

    /**
     * Returns true if video snapshot is supported. That is, applications
     * can call {@link #takePicture(Camera.ShutterCallback,
     * Camera.PictureCallback, Camera.PictureCallback, Camera.PictureCallback)}
     * during recording. Applications do not need to call {@link
     * #startPreview()} after taking a picture. The preview will be still
     * active. Other than that, taking a picture during recording is
     * identical to taking a picture normally. All settings and methods
     * related to takePicture work identically. Ex: {@link
     * #getPictureSize()}, {@link #getSupportedPictureSizes()}, {@link
     * #setJpegQuality(int)}, {@link #setRotation(int)}, and etc. The
     * picture will have an EXIF header. {@link #FLASH_MODE_AUTO} and {@link
     * #FLASH_MODE_ON} also still work, but the video will record the flash.
     *
     * Applications can set shutter callback as null to avoid the shutter
     * sound. It is also recommended to set raw picture and post view
     * callbacks to null to avoid the interrupt of preview display.
     *
     * Field-of-view of the recorded video may be different from that of the
     * captured pictures.
     *
     * @return true if video snapshot is supported.
     */
android.hardware.Camera.Parameters.isVideoSnapshotSupported()

这个API的注释把使用过程都说的很清楚,跟普通拍摄没什么两样,需要注意的就是在做Video Snapshot的时候不需要start preview就可以拍摄,因为录影的过程中,整个preview一直是活跃的。不会像普通的拍摄一样,每拍摄一次preview就会停止,需要我们再次start preview。

这项功能的关键在于厂商的芯片的好坏,比如能否在录影过程中的拍摄也支持full resolution等等。

AOSP的Camera当中就通过这行代码来实现这个功能

Log.v(TAG, "Video snapshot start");
mCameraDevice.takePicture(null, null, null, new JpegPictureCallback(loc));

更多可以参见

$ANDROID_SRC_HOME/framework/packages/apps/Camera/src/com/android/camera/VideoCamera.java

很简单不是?

读AOSP的Camera源码之Camera Button启动Camera APP

打算把平时阅读AOSP Camera源码的过程记录下,有些地方没有很精确的去揣摩,所以如果理解有误,烦请大家指出!

Camera Button启动APP,这个比较简单。原理就是当你长按Camera Button的时候,系统会发出一个Broadcast,所以APP只需要注册一个Receiver来接收这个broadcast message就好,当收到这个消息后就去启动APP。

直接用代码说明

Manifest当中有这段配置

<receiver android:name="com.android.camera.CameraButtonIntentReceiver">
    <intent-filter>
        <action android:name="android.intent.action.CAMERA_BUTTON"/>
    </intent-filter>
</receiver>

这个CameraButtonIntentReceiver中会去启动APP,但是它里面有个判断,先判断Hardware可不可以用,如果可以用,就去启动APP,不可以用就什么都不做,直接return掉。好处大概就是防止当你系统运行了很多指令之后,发现HW不可以用,尽早的把错误告诉用户,这是一个良好的程序设计思路。

这里需要关注的就是CameraHolder,它的作用级是保持一个android.hardware.Camera实例,并且记录有多少“用户”打开了android.hardware.Camera这个资源,从它的mUsers属性可以看出。
CameraHolder这个有个小小的特殊的地方,就是如果你有call它的keep方法,它会在你call它的release方法的时候短暂的保持android.hardware.Camera不被释放掉(这里是3秒),这时如果你再次调用open方法,就会节省一些不必要的开销。

这里开启的过程就不说了,比较简单明了。Receiver中一个startActivity就完成了。