Qualcomm Camera HAL 2.0

我们知道在HAL的Vendor实现当中会动态去load一个名字为camera.$platform$.so的档案,然后去加载Android HAL当中定义的方法,这里以Camera HAL 2.0并且Qualcomm msm8960为例子看下,结合之前的一篇文章(http://guoh.org/lifelog/2013/07/glance-at-camera-hal-2-0/)。

(注:这篇文章已经草稿比较久了,但是一直没有发出来,因为手里的这版代码没有设备可以跑,另外也无法确定代码是否完全正确,至少发现了一些地方都是stub实现,文中可能存在一些错误,如发现不正确的地方欢迎指出,我也会尽量发现错误并修正!)

我们知道在camera2.h当中定义了很多方法,那么在msm8960 HAL就是在如下地方
/path/to/qcam-hal/QCamera/HAL2
这编译出来就是一个camera.$platform$.so,请看它的实现
首先是HAL2/wrapper/QualcommCamera.h|cpp

/**
 * The functions need to be provided by the camera HAL.
 *
 * If getNumberOfCameras() returns N, the valid cameraId for getCameraInfo()
 * and openCameraHardware() is 0 to N-1.
 */

static hw_module_methods_t camera_module_methods = {
    open: camera_device_open,
};

static hw_module_t camera_common  = {
    tag: HARDWARE_MODULE_TAG,
    module_api_version: CAMERA_MODULE_API_VERSION_2_0, // 这样Camera Service才会去初始化Camera2Client一系列
    hal_api_version: HARDWARE_HAL_API_VERSION,
    id: CAMERA_HARDWARE_MODULE_ID,
    name: "Qcamera",
    author:"Qcom",
    methods: &camera_module_methods,
    dso: NULL,
    reserved:  {0},
};

camera_module_t HAL_MODULE_INFO_SYM = { // 这个HMI,每个HAL模块都必须有的
    common: camera_common,
    get_number_of_cameras: get_number_of_cameras,
    get_camera_info: get_camera_info,
};

camera2_device_ops_t camera_ops = { // 注意这些绑定的函数
    set_request_queue_src_ops:           android::set_request_queue_src_ops,
    notify_request_queue_not_empty:      android::notify_request_queue_not_empty,
    set_frame_queue_dst_ops:             android::set_frame_queue_dst_ops,
    get_in_progress_count:               android::get_in_progress_count,
    flush_captures_in_progress:          android::flush_captures_in_progress,
    construct_default_request:           android::construct_default_request,

    allocate_stream:                     android::allocate_stream,
    register_stream_buffers:             android::register_stream_buffers,
    release_stream:                      android::release_stream,

    allocate_reprocess_stream:           android::allocate_reprocess_stream,
    allocate_reprocess_stream_from_stream: android::allocate_reprocess_stream_from_stream,
    release_reprocess_stream:            android::release_reprocess_stream,

    trigger_action:                      android::trigger_action,
    set_notify_callback:                 android::set_notify_callback,
    get_metadata_vendor_tag_ops:         android::get_metadata_vendor_tag_ops,
    dump:                                android::dump,
};

typedef struct { // 注意这个是Qualcomm自己定义的一个wrap结构
  camera2_device_t hw_dev; // 这里是标准的
  QCameraHardwareInterface *hardware;
  int camera_released;
  int cameraId;
} camera_hardware_t;

/* HAL should return NULL if it fails to open camera hardware. */
extern "C" int  camera_device_open(
  const struct hw_module_t* module, const char* id,
          struct hw_device_t** hw_device)
{
    int rc = -1;
    int mode = 0;
    camera2_device_t *device = NULL;
    if (module && id && hw_device) {
        int cameraId = atoi(id);

        if (!strcmp(module->name, camera_common.name)) {
            camera_hardware_t *camHal =
                (camera_hardware_t *) malloc(sizeof (camera_hardware_t));
            if (!camHal) {
                *hw_device = NULL;
	        	ALOGE("%s:  end in no mem", __func__);
				return rc;
	    	}
		    /* we have the camera_hardware obj malloced */
		    memset(camHal, 0, sizeof (camera_hardware_t));
		    camHal->hardware = new QCameraHardwareInterface(cameraId, mode);
		    if (camHal->hardware && camHal->hardware->isCameraReady()) {
				camHal->cameraId = cameraId;
		    	device = &camHal->hw_dev; // 这里camera2_device_t
		        device->common.close = close_camera_device; // 初始化camera2_device_t
		        device->common.version = CAMERA_DEVICE_API_VERSION_2_0;
		        device->ops = &camera_ops;
		        device->priv = (void *)camHal;
		        rc =  0;
		    } else {
		        if (camHal->hardware) {
		            delete camHal->hardware;
		            camHal->hardware = NULL;
		        }
		        free(camHal);
		        device = NULL;
		    }
        }
    }
    /* pass actual hw_device ptr to framework. This amkes that we actally be use memberof() macro */
    *hw_device = (hw_device_t*)&device->common; // 这就是kernel或者Android native framework常用的一招
    return rc;
}

看看allocate stream

int allocate_stream(const struct camera2_device *device,
        uint32_t width,
        uint32_t height,
        int      format,
        const camera2_stream_ops_t *stream_ops,
        uint32_t *stream_id,
        uint32_t *format_actual,
        uint32_t *usage,
        uint32_t *max_buffers)
{
    QCameraHardwareInterface *hardware = util_get_Hal_obj(device);
	hardware->allocate_stream(width, height, format, stream_ops,
            stream_id, format_actual, usage, max_buffers);
    return rc;
}

这里注意QCameraHardwareInterface在QCameraHWI.h|cpp当中

int QCameraHardwareInterface::allocate_stream(
    uint32_t width,
    uint32_t height, int format,
    const camera2_stream_ops_t *stream_ops,
    uint32_t *stream_id,
    uint32_t *format_actual,
    uint32_t *usage,
    uint32_t *max_buffers)
{
    int ret = OK;
    QCameraStream *stream = NULL;
    camera_mode_t myMode = (camera_mode_t)(CAMERA_MODE_2D|CAMERA_NONZSL_MODE);

    stream = QCameraStream_preview::createInstance(
                        mCameraHandle->camera_handle,
                        mChannelId,
                        width,
                        height,
                        format,
                        mCameraHandle,
                        myMode);

    stream->setPreviewWindow(stream_ops); // 这里,也就是只要通过该方法创建的stream,都会有对应的ANativeWindow进来
    *stream_id = stream->getStreamId();
    *max_buffers= stream->getMaxBuffers(); // 从HAL得到的
    *usage = GRALLOC_USAGE_HW_CAMERA_WRITE | CAMERA_GRALLOC_HEAP_ID
        | CAMERA_GRALLOC_FALLBACK_HEAP_ID;
    /* Set to an arbitrary format SUPPORTED by gralloc */
    *format_actual = HAL_PIXEL_FORMAT_YCrCb_420_SP;

    return ret;
}

QCameraStream_preview::createInstance直接调用自己的构造方法,也就是下面
(相关class在QCameraStream.h|cpp和QCameraStream_Preview.cpp)

QCameraStream_preview::QCameraStream_preview(uint32_t CameraHandle,
                        uint32_t ChannelId,
                        uint32_t Width,
                        uint32_t Height,
                        int requestedFormat,
                        mm_camera_vtbl_t *mm_ops,
                        camera_mode_t mode) :
                 QCameraStream(CameraHandle,
                        ChannelId,
                        Width,
                        Height,
                        mm_ops,
                        mode),
                 mLastQueuedFrame(NULL),
                 mDisplayBuf(NULL),
                 mNumFDRcvd(0)
{
    mStreamId = allocateStreamId(); // 分配stream id(根据mStreamTable)

    switch (requestedFormat) { // max buffer number
    case CAMERA2_HAL_PIXEL_FORMAT_OPAQUE:
        mMaxBuffers = 5;
        break;
    case HAL_PIXEL_FORMAT_BLOB:
        mMaxBuffers = 1;
        break;
    default:
        ALOGE("Unsupported requested format %d", requestedFormat);
        mMaxBuffers = 1;
        break;
    }
    /*TODO: There has to be a better way to do this*/
}

再看看
/path/to/qcam-hal/QCamera/stack/mm-camera-interface/
mm_camera_interface.h
当中

typedef struct {
    uint32_t camera_handle;        /* camera object handle */
    mm_camera_info_t *camera_info; /* reference pointer of camear info */
    mm_camera_ops_t *ops;          /* API call table */
} mm_camera_vtbl_t;

mm_camera_interface.c
当中

/* camera ops v-table */
static mm_camera_ops_t mm_camera_ops = {
    .sync = mm_camera_intf_sync,
    .is_event_supported = mm_camera_intf_is_event_supported,
    .register_event_notify = mm_camera_intf_register_event_notify,
    .qbuf = mm_camera_intf_qbuf,
    .camera_close = mm_camera_intf_close,
    .query_2nd_sensor_info = mm_camera_intf_query_2nd_sensor_info,
    .is_parm_supported = mm_camera_intf_is_parm_supported,
    .set_parm = mm_camera_intf_set_parm,
    .get_parm = mm_camera_intf_get_parm,
    .ch_acquire = mm_camera_intf_add_channel,
    .ch_release = mm_camera_intf_del_channel,
    .add_stream = mm_camera_intf_add_stream,
    .del_stream = mm_camera_intf_del_stream,
    .config_stream = mm_camera_intf_config_stream,
    .init_stream_bundle = mm_camera_intf_bundle_streams,
    .destroy_stream_bundle = mm_camera_intf_destroy_bundle,
    .start_streams = mm_camera_intf_start_streams,
    .stop_streams = mm_camera_intf_stop_streams,
    .async_teardown_streams = mm_camera_intf_async_teardown_streams,
    .request_super_buf = mm_camera_intf_request_super_buf,
    .cancel_super_buf_request = mm_camera_intf_cancel_super_buf_request,
    .start_focus = mm_camera_intf_start_focus,
    .abort_focus = mm_camera_intf_abort_focus,
    .prepare_snapshot = mm_camera_intf_prepare_snapshot,
    .set_stream_parm = mm_camera_intf_set_stream_parm,
    .get_stream_parm = mm_camera_intf_get_stream_parm
};

以start stream为例子

mm_camera_intf_start_streams(mm_camera_interface
    mm_camera_start_streams(mm_camera
    	mm_channel_fsm_fn(mm_camera_channel
    		mm_channel_fsm_fn_active(mm_camera_channel
    			mm_channel_start_streams(mm_camera_channel
    				mm_stream_fsm_fn(mm_camera_stream
    					mm_stream_fsm_reg(mm_camera_stream
    						mm_camera_cmd_thread_launch(mm_camera_data
    						mm_stream_streamon(mm_camera_stream

注意:本文当中,如上这种梯度摆放,表示是调用关系,如果梯度是一样的,就表示这些方法是在上层同一个方法里面被调用的

int32_t mm_stream_streamon(mm_stream_t *my_obj)
{
    int32_t rc;
    enum v4l2_buf_type buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;

    /* Add fd to data poll thread */
    rc = mm_camera_poll_thread_add_poll_fd(&my_obj->ch_obj->poll_thread[0],
                                           my_obj->my_hdl,
                                           my_obj->fd,
                                           mm_stream_data_notify,
                                           (void*)my_obj);
    if (rc < 0) {
        return rc;
    }
    rc = ioctl(my_obj->fd, VIDIOC_STREAMON, &buf_type);
    if (rc < 0) {
        CDBG_ERROR("%s: ioctl VIDIOC_STREAMON failed: rc=%d\n",
                   __func__, rc);
        /* remove fd from data poll thread in case of failure */
        mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0], my_obj->my_hdl);
    }
    return rc;
}

看到ioctl,VIDIOC_STREAMON,可以高兴一下了,这就是V4L2规范当中用户空间和内核空间通信的方法,V4L2(Video for Linux Two)是一种经典而且成熟的视频通信协议,之前是V4L,不清楚的可以去下载它的规范,另外The Video4Linux2(http://lwn.net/Articles/203924/)也是很好的资料。
这里简单介绍下:

open(VIDEO_DEVICE_NAME, …) // 开启视频设备,一般在程序初始化的时候调用

ioctl(…) // 主要是一些需要传输数据量很小的控制操作
这里可以用的参数很多,并且通常来说我们会按照以下方式来使用,比如
VIDIOC_QUERYCAP // 查询设备能干什么
VIDIOC_CROPCAP // 查询设备crop能力
VIDIOC_S_* // set/get方法,设置/获取参数
VIDIOC_G_*
VIDIOC_REQBUFS // 分配buffer,可以有多种方式
VIDIOC_QUERYBUF // 查询分配的buffer的信息
VIDIOC_QBUF // QUEUE BUFFER 把buffer压入DRV缓存队列(这时候buffer是空的)
VIDIOC_STREAMON // 开始视频数据传输
VIDIOC_DQBUF // DEQUEUE BUFFER 把buffer从DRV缓存队列中取出(这时候buffer是有数据的)

[0…n]
QBUF -> DQBUF // 可以一直重复这个动作

VIDIOC_STREAMOFF // 停止视频数据传输

close(VIDEO_DEVICE_FD) // 关闭设备
上面就是主要的函数和简单的调用顺序,另外还有几个函数

select() // 等待事件发生,主要用在我们把存frame的buffer推给DRV以后,等待它的反应
mmap/munmap // 主要处理我们request的buffer的,buffer分配在设备的内存空间的时候需要

并且看看mm_camera_stream这个文件里面也都是这么实现的。

看完这里,我们回过头来继续看QCam HAL,当然它实现的细节也不是我上面start stream所列的那么简单,但是其实也不算复杂,觉得重要的就是状态和用到的结构。

首先是channel状态,目前只支持1个channel,但是可以有多个streams(后面会介绍,而且目前最多支持8个streams)

/* mm_channel */
typedef enum {
    MM_CHANNEL_STATE_NOTUSED = 0,   /* not used */
    MM_CHANNEL_STATE_STOPPED,       /* stopped */
    MM_CHANNEL_STATE_ACTIVE,        /* active, at least one stream active */
    MM_CHANNEL_STATE_PAUSED,        /* paused */
    MM_CHANNEL_STATE_MAX
} mm_channel_state_type_t;

它可以执行的事件

typedef enum {
    MM_CHANNEL_EVT_ADD_STREAM,
    MM_CHANNEL_EVT_DEL_STREAM,
    MM_CHANNEL_EVT_START_STREAM,
    MM_CHANNEL_EVT_STOP_STREAM,
    MM_CHANNEL_EVT_TEARDOWN_STREAM,
    MM_CHANNEL_EVT_CONFIG_STREAM,
    MM_CHANNEL_EVT_PAUSE,
    MM_CHANNEL_EVT_RESUME,
    MM_CHANNEL_EVT_INIT_BUNDLE,
    MM_CHANNEL_EVT_DESTROY_BUNDLE,
    MM_CHANNEL_EVT_REQUEST_SUPER_BUF,
    MM_CHANNEL_EVT_CANCEL_REQUEST_SUPER_BUF,
    MM_CHANNEL_EVT_START_FOCUS,
    MM_CHANNEL_EVT_ABORT_FOCUS,
    MM_CHANNEL_EVT_PREPARE_SNAPSHOT,
    MM_CHANNEL_EVT_SET_STREAM_PARM,
    MM_CHANNEL_EVT_GET_STREAM_PARM,
    MM_CHANNEL_EVT_DELETE,
    MM_CHANNEL_EVT_MAX
} mm_channel_evt_type_t;
/* mm_stream */
typedef enum { // 这里的状态要仔细,每执行一次方法,状态就需要变化
    MM_STREAM_STATE_NOTUSED = 0,      /* not used */
    MM_STREAM_STATE_INITED,           /* inited  */
    MM_STREAM_STATE_ACQUIRED,         /* acquired, fd opened  */
    MM_STREAM_STATE_CFG,              /* fmt & dim configured */
    MM_STREAM_STATE_BUFFED,           /* buf allocated */
    MM_STREAM_STATE_REG,              /* buf regged, stream off */
    MM_STREAM_STATE_ACTIVE_STREAM_ON, /* active with stream on */
    MM_STREAM_STATE_ACTIVE_STREAM_OFF, /* active with stream off */
    MM_STREAM_STATE_MAX
} mm_stream_state_type_t;

同样,stream可以执行的事件

typedef enum {
    MM_STREAM_EVT_ACQUIRE,
    MM_STREAM_EVT_RELEASE,
    MM_STREAM_EVT_SET_FMT,
    MM_STREAM_EVT_GET_BUF,
    MM_STREAM_EVT_PUT_BUF,
    MM_STREAM_EVT_REG_BUF,
    MM_STREAM_EVT_UNREG_BUF,
    MM_STREAM_EVT_START,
    MM_STREAM_EVT_STOP,
    MM_STREAM_EVT_QBUF,
    MM_STREAM_EVT_SET_PARM,
    MM_STREAM_EVT_GET_PARM,
    MM_STREAM_EVT_MAX
} mm_stream_evt_type_t;

这里每次执行函数的时候都需要检查channel/stream的状态,只有状态正确的时候才会去执行

比如你可以观察到
mm_channel的mm_channel_state_type_t state;
mm_stream的mm_stream_state_type_t state;
均表示这个结构当前的状态

另外
struct mm_camera_obj
struct mm_channel
struct mm_stream
这三个也是自上而下包含的,并且stream和channel还会持有父结构(暂且这么称呼,实际为container关系)的引用。

实际上Vendor的HAL每个都有自己实现的方法,也可能包含很多特有的东西,比如这里它会喂给ioctl一些特有的命令或者数据结构,这些我们就只有在做特定平台的时候去考虑了。这些都可能千变万化,比如OMAP4它同DRV沟通是透过rpmsg,并用OpenMAX的一套规范来实现的。

理论就这么多,接着看一个实例,比如我们在Camera Service要去start preview:

Camera2Client::startPreviewL
	StreamingProcessor->updatePreviewStream
		Camera2Device->createStream
			StreamAdapter->connectToDevice
				camera2_device_t->ops->allocate_stream // 上面有分析
				native_window_api_*或者native_window_*

	StreamingProcessor->startStream
		Camera2Device->setStreamingRequest
			Camera2Device::RequestQueue->setStreamSlot // 创建一个stream slot
				Camera2Device::RequestQueue->signalConsumerLocked
status_t Camera2Device::MetadataQueue::signalConsumerLocked() {
    status_t res = OK;
    notEmpty.signal();
    if (mSignalConsumer && mDevice != NULL) {
        mSignalConsumer = false;
        mMutex.unlock();
        res = mDevice->ops->notify_request_queue_not_empty(mDevice); // 通知Vendor HAL的run command thread去运行,
        															 // notify_request_queue_not_empty这个事件不是每次都会触发的,只有初始化时候
        															 // 或者run command thread在dequeue的时候发现数据为NULL,
        															 // 而Camera Service之变又有新的request进来的时候才会去触发
        															 // 可以说是减轻负担吧,不用没有请求的时候,thread也一直在那里
        															 // 不过通常碰到这样的情况都是利用锁让thread停在那里
        mMutex.lock();
    }
    return res;
}

然而在Qualcomm HAL当中

int notify_request_queue_not_empty(const struct camera2_device *device) // 这个方法注册到camera2_device_ops_t当中
	QCameraHardwareInterface->notify_request_queue_not_empty()
		pthread_create(&mCommandThread, &attr, command_thread, (void *)this) != 0)
void *command_thread(void *obj)
{
	...
	pme->runCommandThread(obj);
}
void QCameraHardwareInterface::runCommandThread(void *data)
{
    /**
     * This function implements the main service routine for the incoming
     * frame requests, this thread routine is started everytime we get a 
     * notify_request_queue_not_empty trigger, this thread makes the 
     * assumption that once it receives a NULL on a dequest_request call 
     * there will be a fresh notify_request_queue_not_empty call that is
     * invoked thereby launching a new instance of this thread. Therefore,
     * once we get a NULL on a dequeue request we simply let this thread die
     */ 
    int res;
    camera_metadata_t *request=NULL;
    mPendingRequests=0;

    while (mRequestQueueSrc) { // mRequestQueueSrc是通过set_request_queue_src_ops设置进来的
    						   // 参见Camera2Device::MetadataQueue::setConsumerDevice
    						   // 在Camera2Device::initialize当中被调用
        ALOGV("%s:Dequeue request using mRequestQueueSrc:%p",__func__,mRequestQueueSrc);
        mRequestQueueSrc->dequeue_request(mRequestQueueSrc, &request); // 取framework request
        if (request==NULL) {
            ALOGE("%s:No more requests available from src command \
                    thread dying",__func__);
            return;
        }
        mPendingRequests++;

        /* Set the metadata values */

        /* Wait for the SOF for the new metadata values to be applied */

        /* Check the streams that need to be active in the stream request */
        sort_camera_metadata(request);

        camera_metadata_entry_t streams;
        res = find_camera_metadata_entry(request,
                ANDROID_REQUEST_OUTPUT_STREAMS,
                &streams);
        if (res != NO_ERROR) {
            ALOGE("%s: error reading output stream tag", __FUNCTION__);
            return;
        }

        res = tryRestartStreams(streams); // 会去prepareStream和streamOn,后面有详细代码
        if (res != NO_ERROR) {
            ALOGE("error tryRestartStreams %d", res);
            return;
        }

        /* 3rd pass: Turn on all streams requested */
        for (uint32_t i = 0; i < streams.count; i++) {
            int streamId = streams.data.u8[i];
            QCameraStream *stream = QCameraStream::getStreamAtId(streamId);

            /* Increment the frame pending count in each stream class */

            /* Assuming we will have the stream obj in had at this point may be
             * may be multiple objs in which case we loop through array of streams */
            stream->onNewRequest();
        }
        ALOGV("%s:Freeing request using mRequestQueueSrc:%p",__func__,mRequestQueueSrc);
        /* Free the request buffer */
        mRequestQueueSrc->free_request(mRequestQueueSrc,request);
        mPendingRequests--;
        ALOGV("%s:Completed request",__func__);
    }
 
    QCameraStream::streamOffAll();
}

下面这个方法解释mRequestQueueSrc来自何处

// Connect to camera2 HAL as consumer (input requests/reprocessing)
status_t Camera2Device::MetadataQueue::setConsumerDevice(camera2_device_t *d) {
    ATRACE_CALL();
    status_t res;
    res = d->ops->set_request_queue_src_ops(d,
            this);
    if (res != OK) return res;
    mDevice = d;
    return OK;
}

因为

QCameraStream_preview->prepareStream
	QCameraStream->initStream
		mm_camera_vtbl_t->ops->add_stream(... stream_cb_routine ...) // 这是用来返回数据的callback,带mm_camera_super_buf_t*和void*两参数
			mm_camera_add_stream
				mm_channel_fsm_fn(..., MM_CHANNEL_EVT_ADD_STREAM, ..., mm_evt_paylod_add_stream_t)
					mm_channel_fsm_fn_stopped
						mm_channel_add_stream(..., mm_camera_buf_notify_t, ...)
							mm_stream_fsm_inited


在mm_channel_add_stream当中有把mm_camera_buf_notify_t包装到mm_stream_t

mm_stream_t *stream_obj = NULL;
/* initialize stream object */
memset(stream_obj, 0, sizeof(mm_stream_t));
/* cd through intf always palced at idx 0 of buf_cb */
stream_obj->buf_cb[0].cb = buf_cb; // callback
stream_obj->buf_cb[0].user_data = user_data;
stream_obj->buf_cb[0].cb_count = -1; /* infinite by default */ // 默认无限次数

并且mm_stream_fsm_inited,传进来的event参数也是MM_STREAM_EVT_ACQUIRE

int32_t mm_stream_fsm_inited(mm_stream_t *my_obj,
                             mm_stream_evt_type_t evt,
                             void * in_val,
                             void * out_val)
{
    int32_t rc = 0;
    char dev_name[MM_CAMERA_DEV_NAME_LEN];

    switch (evt) {
    case MM_STREAM_EVT_ACQUIRE:
        if ((NULL == my_obj->ch_obj) || (NULL == my_obj->ch_obj->cam_obj)) {
            CDBG_ERROR("%s: NULL channel or camera obj\n", __func__);
            rc = -1;
            break;
        }

        snprintf(dev_name, sizeof(dev_name), "/dev/%s",
                 mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl));

        my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK); // 打开视频设备
        if (my_obj->fd <= 0) {
            CDBG_ERROR("%s: open dev returned %d\n", __func__, my_obj->fd);
            rc = -1;
            break;
        }
        rc = mm_stream_set_ext_mode(my_obj);
        if (0 == rc) {
            my_obj->state = MM_STREAM_STATE_ACQUIRED; // mm_stream_state_type_t
        } else {
            /* failed setting ext_mode
             * close fd */
            if(my_obj->fd > 0) {
                close(my_obj->fd);
                my_obj->fd = -1;
            }
            break;
        }
        rc = get_stream_inst_handle(my_obj);
        if(rc) {
            if(my_obj->fd > 0) {
                close(my_obj->fd);
                my_obj->fd = -1;
            }
        }
        break;
    default:
        CDBG_ERROR("%s: Invalid evt=%d, stream_state=%d",
                   __func__,evt,my_obj->state);
        rc = -1;
        break;
    }
    return rc;
}

还有

QCameraStream->streamOn
	mm_camera_vtbl_t->ops->start_streams
		mm_camera_intf_start_streams
			mm_camera_start_streams
				mm_channel_fsm_fn(..., MM_CHANNEL_EVT_START_STREAM, ...)
					mm_stream_fsm_fn(..., MM_STREAM_EVT_START, ...)
						mm_camera_cmd_thread_launch // 启动CB线程
						mm_stream_streamon(mm_stream_t)
							mm_camera_poll_thread_add_poll_fd(..., mm_stream_data_notify , ...)

static void mm_stream_data_notify(void* user_data)
{
    mm_stream_t *my_obj = (mm_stream_t*)user_data;
    int32_t idx = -1, i, rc;
    uint8_t has_cb = 0;
    mm_camera_buf_info_t buf_info;

    if (NULL == my_obj) {
        return;
    }

    if (MM_STREAM_STATE_ACTIVE_STREAM_ON != my_obj->state) {
        /* this Cb will only received in active_stream_on state
         * if not so, return here */
        CDBG_ERROR("%s: ERROR!! Wrong state (%d) to receive data notify!",
                   __func__, my_obj->state);
        return;
    }

    memset(&buf_info, 0, sizeof(mm_camera_buf_info_t));

    pthread_mutex_lock(&my_obj->buf_lock);
    rc = mm_stream_read_msm_frame(my_obj, &buf_info); // 通过ioctl(..., VIDIOC_DQBUF, ...)读取frame数据
    if (rc != 0) {
        pthread_mutex_unlock(&my_obj->buf_lock);
        return;
    }
    idx = buf_info.buf->buf_idx;

    /* update buffer location */
    my_obj->buf_status[idx].in_kernel = 0;

    /* update buf ref count */
    if (my_obj->is_bundled) {
        /* need to add into super buf since bundled, add ref count */
        my_obj->buf_status[idx].buf_refcnt++;
    }

    for (i=0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
        if(NULL != my_obj->buf_cb[i].cb) {
            /* for every CB, add ref count */
            my_obj->buf_status[idx].buf_refcnt++;
            has_cb = 1;
        }
    }
    pthread_mutex_unlock(&my_obj->buf_lock);

    mm_stream_handle_rcvd_buf(my_obj, &buf_info); // mm_camera_queue_enq,往queue里面丢frame数据(
    											  // 前提是有注册callback),并透过sem_post通知queue
    											  // 然后mm_camera_cmd_thread_launch启动的线程会
    											  // 轮循读取数据,然后执行CB
}

这样就会导致在stream on的时候stream_cb_routine(实现在QCameraStream当中)就会一直执行

void stream_cb_routine(mm_camera_super_buf_t *bufs,
                       void *userdata)
{
    QCameraStream *p_obj=(QCameraStream*) userdata;
    switch (p_obj->mExtImgMode) { // 这个mode在prepareStream的时候就会确定
    case MM_CAMERA_PREVIEW:
        ALOGE("%s : callback for MM_CAMERA_PREVIEW", __func__);
        ((QCameraStream_preview *)p_obj)->dataCallback(bufs); // CAMERA_PREVIEW和CAMERA_VIDEO是一样的?
        break;
    case MM_CAMERA_VIDEO:
        ALOGE("%s : callback for MM_CAMERA_VIDEO", __func__);
        ((QCameraStream_preview *)p_obj)->dataCallback(bufs);
        break;
    case MM_CAMERA_SNAPSHOT_MAIN:
        ALOGE("%s : callback for MM_CAMERA_SNAPSHOT_MAIN", __func__);
        p_obj->p_mm_ops->ops->qbuf(p_obj->mCameraHandle,
                                   p_obj->mChannelId,
                                   bufs->bufs[0]);
		break;
	case MM_CAMERA_SNAPSHOT_THUMBNAIL:
		break;
	default:
		break;
    }
}
void QCameraStream::dataCallback(mm_camera_super_buf_t *bufs)
{
    if (mPendingCount != 0) { // 这个dataCallback是一直在都在回来么?
    						   // 而且从代码来看设置下去的callback次数默认是-1,-1就表示infinite。
    						   // 似乎只能这样才能解释,否则没人触发的话,即使mPendingCount在onNewRequest当中加1了
    						   // 这里也感知不到
        ALOGD("Got frame request");
        pthread_mutex_lock(&mFrameDeliveredMutex);
        mPendingCount--;
        ALOGD("Completed frame request");
        pthread_cond_signal(&mFrameDeliveredCond);
        pthread_mutex_unlock(&mFrameDeliveredMutex);
        processPreviewFrame(bufs);
    } else {
        p_mm_ops->ops->qbuf(mCameraHandle,
                mChannelId, bufs->bufs[0]); // 如果没有需要数据的情况,直接把buffer压入DRV的队列当中,会call到V4L2的QBUF
    }
}

比较好奇的是在手里这版QCam HAL的code当中camera2_frame_queue_dst_ops_t没有被用到

int QCameraHardwareInterface::set_frame_queue_dst_ops(
    const camera2_frame_queue_dst_ops_t *frame_dst_ops)
{
    mFrameQueueDst = frame_dst_ops; // 这个现在似乎没有用到嘛
    return OK;
}

这样Camera Service的FrameProcessor的Camera2Device->getNextFrame就永远也获取不到数据,不知道是不是我手里的这版代码的问题,而且在最新的Qualcomm Camera HAL代码也不在AOSP树当中了,而是直接以proprietary形式给的so档,这只是题外话。

所以总体来看,这里可能有几个QCameraStream,每个stream负责自己的事情。
他们之间也有相互关系,比如有可能新的stream进来会导致其他已经stream-on的stream重新启动。

在Camera HAL 2.0当中我们还有个重点就是re-process stream
简单的说就是把output stream作为input stream再次添加到BufferQueue中,让其他的consumer来处理,就类似一个chain一样。
目前在ZslProcessor当中有用到。

ZslProcessor->updateStream
	Camera2Device->createStream
	Camera2Device->createReprocessStreamFromStream // release的时候是先delete re-process
		new ReprocessStreamAdapter
		ReprocessStreamAdapter->connectToDevice
			camera2_device_t->ops->allocate_reprocess_stream_from_stream

这里ReprocessStreamAdapter实际就是camera2_stream_in_ops_t,负责管理re-process的stream。

但是这版的代码Qualcomm也似乎没有去实现,所以暂时到此为止,如果后面找到相应的代码,再来看。

所以看完这么多不必觉得惊讶,站在Camera Service的立场,它持有两个MetadataQueue,mRequestQueue和mFrameQueue。
app请求的动作,比如set parameter/start preview/start recording会直接转化为request,放到mRequestQueue,然后去重启preview/recording stream。
比如capture也会转换为request,放到mRequestQueue。
如果有必要,会通过notify_request_queue_not_empty去通知QCam HAL有请求需要处理,然后QCam HAL会启动一个线程(QCameraHardwareInterface::runCommandThread)去做处理。直到所有request处理完毕退出线程。
在这个处理的过程当中会分别调用到每个stream的processPreviewFrame,有必要的话它每个都会调用自己后续的callback。
还有一个实现的细节就是,stream_cb_routine是从start stream就有开始注册在同一个channel上的,而stream_cb_routine间接调用QCameraStream::dataCallback(当然stream_cb_routine有去指定这个callback回来的原因是什么,就好调用对应的dataCallback),这个callback是一直都在回来,所以每次new request让mPendingCount加1之后,dataCallback回来才会调用processPreviewFrame,否则就直接把buffer再次压回DRV队列当中。

void QCameraStream::dataCallback(mm_camera_super_buf_t *bufs)
{
    if (mPendingCount != 0) { // 这个dataCallback是一直在都在回来么?
    						   // 而且从代码来看设置下去的callback次数默认是-1,-1就表示infinite。
    						   // 似乎只能这样才能解释,否则没人触发的话,即使mPendingCount在onNewRequest当中加1了
    						   // 这里也感知不到
        ALOGD("Got frame request");
        pthread_mutex_lock(&mFrameDeliveredMutex);
        mPendingCount--;
        ALOGD("Completed frame request");
        pthread_cond_signal(&mFrameDeliveredCond);
        pthread_mutex_unlock(&mFrameDeliveredMutex);
        processPreviewFrame(bufs);
    } else {
        p_mm_ops->ops->qbuf(mCameraHandle,
                mChannelId, bufs->bufs[0]); // 如果没有需要数据的情况,直接把buffer压入DRV的队列当中,会call到V4L2的QBUF
    }
}
void QCameraStream::onNewRequest()
{
    ALOGI("%s:E",__func__);
    pthread_mutex_lock(&mFrameDeliveredMutex);
    ALOGI("Sending Frame request");
    mPendingCount++;
    pthread_cond_wait(&mFrameDeliveredCond, &mFrameDeliveredMutex); // 等带一个请求处理完,再做下一个请求
    ALOGV("Got frame");
    pthread_mutex_unlock(&mFrameDeliveredMutex);
    ALOGV("%s:X",__func__);
}

processPreviewFrame会调用到创建这个stream的时候关联进来的那个BufferQueue的enqueue_buffer方法,把数据塞到BufferQueue中,然后对应的consumer就会收到了。
比如在Android Camera HAL 2.0当中目前有
camera2/BurstCapture.h
camera2/CallbackProcessor.h
camera2/JpegProcessor.h
camera2/StreamingProcessor.h
camera2/ZslProcessor.h
实现了对应的Consumer::FrameAvailableListener,但是burst-capture现在可以不考虑,因为都还只是stub实现。

ZslProcessor.h和CaptureSequencer.h都有去实现FrameProcessor::FilteredListener的onFrameAvailable(…)
但是我们之前讲过这版QCam HAL没有实现,所以FrameProcessor是无法获取到meta data的。
所以这样来看onFrameAbailable都不会得到通知。(我相信是我手里的这版代码的问题啦)

之前我们说过QCam HAL有部分东西没有实现,所以mFrameQueue就不会有数据,但是它本来应该是DRV回来的元数据会queue到这里面。

另外
CaptureSequencer.h还有去实现onCaptureAvailable,当JpegProcessor处理完了会通知它。

好奇?多个stream(s)不是同时返回的,这样如果CPU处理快慢不同就会有时间差?还有很好奇DRV是如何处理Video snapshot的,如果buffer是顺序的,就会存在Video少一个frame,如果不是顺序的,那就是DRV一次返回多个buffer?以前真没有想过这个问题@_@

媒体文件格式以及ISO/IEC 14496-12规范

网络上很多讲解媒体文件格式(比如MP4档案)的文章,有不少是作者懂了相关的东西,然后再自己总结出来的,优点就是他们抓住重点,写出注意的地方,缺点就是有时候你会觉得很突然,突然就出来一个新的概念,定义,因为他们默认你应该熟悉相应的规范。我在这里从基本点入手,写点关于媒体文件格式的东西。

首先你应该有一份ISO/IEC 14496-12(Part 12: ISO base media file format),这个可以从http://standards.iso.org/ittf/PubliclyAvailableStandards/index.html免费下载到。虽然是英文的,但是一些基本的东西还是不难。
当然你应该知道一些基本的概念,比如container file, codec, track等等。比如通常看到的一个MP4文件它就是一个container file,它里面装了来自codec编码后的数据,可能包括一个或者多个track(通常有一个视频track,一个音频track,有的可能有多个音频track,比如中文/英文/广东话/西班牙语,还有可能有字幕track)。

首先需要阅读最基本的几个章节就可以
3 Terms, definitions, and abbreviated terms
4 Object-structured File Organization
5 Design Considerations
即一些基本术语,定义,比如Box, Chunk, Container Box, Hint Box, Sample等等
其中文件组织结构当中规定了文件是有一系列的Box组成,在文件当中除了Box没有其他东西,所有数据都是包含在Box当中,以及为什么要采取这样面相对象的设计。
通常Box由header和包含在其中的data组成,header中包含了这个Box的size和type信息,当然size是包含了这个Box当中所有数据在内,也包含这个header。
如果Box的type不能被识别,那么这个Box就该被忽略。

aligned(8) class Box(unsigned int(32) boxtype,
		optional unsigned int(8)[16] extended_type) {
	unsigned int(32) size;
	unsigned int(32) type = boxtype;
	if (size == 1) { // 如果size为1就用largesize来表示这个Box的大小
		unsigned int(64) largesize;
	} else if (size == 0) {
		// box extends to end of file
	}
	if (boxtype == 'uuid') { // 这里表示自定义的类型
		unsigned int(8)[16] usertype = extended_type;
	}
}

这里都是按照8位对齐,所以最小单元是8位,即一个字节,本文章中所指的单元均是这里表述的意思。
所以你就可以很容易的知道,属性size和type分别各自占用4个单元。

另外很多Box都包含version和flag这两个字段,所以又抽象出来一个FullBox

aligned(8) class FullBox(unsigned int(32) boxtype, unsigned int(8) v, bit(24) f)
		extends Box(boxtype) {
	unsigned int(8) version = v; // 1个单元
	bit(24) flags = f; // 3个单元
}

如果Box的version不能被识别,那么这个Box就该被忽略。

所有符合这规范的文件都应当包含一个File Type Box,并且这个Box应当放置在文件尽可能早的位置,那么这是个什么样的东西?

aligned(8) class FileTypeBox
		extends Box('ftyp') {
	unsigned int(32) major_brand;
	unsigned int(32) minor_version;
	unsigned int(32) compatible_brands[]; // to end of the box // 这个是不定长的,可以是1个单元,也可以是2,什么时候结束靠前面的size决定
}

那我们还可以知道,一个典型的符合规范的文件包括moov和mdat这两个Boxes,那这又是什么?

这个时候就要继续往下看
6 ISO Base Media File organization
了,Table 1 — Box types, structure, and cross-reference这张图写出了基本上所有的Boxes,可以简单的理解就是moov(Movie Box)是包含描述信息的Box,mdat(Media Data Box)是包含真实多媒体数据的Box,而且这些Boxes都是并列或者嵌套的关系,It’s tree-like。到这里最重要的东西已经都接触了,然后你还要知道这些并列或者嵌套的Boxes要怎么摆放,也就是它的顺序,特别是moov(Movie Box)当中的子Boxes的顺序,另外因为有Streaming Support/Realtime downloading and playing的需求,moov放在mdat的前面,这样只要下载完moov数据就可以解析出来这段视频有多长,什么语言,等等信息,然后就开始播放,后面mdat数据慢慢来下载,因为mdat通常比moov大的多;因为录影功能,我们不知道什么时候录影结束,而且moov相对较小别且大小可能受到mdat大小的影响,所以我们也会把moov放到mdat后面(录影过程当中先把moov保存在内存当中,等mdat写完了再追加到尾部去)。

这是一些基本的理论,当然还有很多东西没有在这里指出,你需要阅读规范来了解,关于具体Box相关的,建议你用到这个Box的时候就去读,因为实在是太多的Boxes了。

现在我们通过一个实例来看下

guohai@KNIGHT:~$ AtomicParsley /path/to/VID_FILE.mp4 -T
Atom ftyp @ 0 of size: 24, ends @ 24
Atom moov @ 24 of size: 53884, ends @ 53908
     Atom mvhd @ 32 of size: 108, ends @ 140
     Atom trak @ 140 of size: 45719, ends @ 45859
         Atom tkhd @ 148 of size: 92, ends @ 240
         Atom mdia @ 240 of size: 45619, ends @ 45859
             Atom mdhd @ 248 of size: 32, ends @ 280
             Atom hdlr @ 280 of size: 44, ends @ 324
             Atom minf @ 324 of size: 45535, ends @ 45859
                 Atom vmhd @ 332 of size: 20, ends @ 352
                 Atom dinf @ 352 of size: 36, ends @ 388
                     Atom dref @ 360 of size: 28, ends @ 388
                 Atom stbl @ 388 of size: 45471, ends @ 45859
                     Atom stsd @ 396 of size: 151, ends @ 547
                         Atom avc1 @ 412 of size: 135, ends @ 547
                             Atom avcC @ 498 of size: 33, ends @ 531
                             Atom pasp @ 531 of size: 16, ends @ 547			 ~
                     Atom stts @ 547 of size: 21024, ends @ 21571
                     Atom stss @ 21571 of size: 232, ends @ 21803
                     Atom stsz @ 21803 of size: 10536, ends @ 32339
                     Atom stsc @ 32339 of size: 7072, ends @ 39411
                     Atom stco @ 39411 of size: 6448, ends @ 45859
     Atom trak @ 45859 of size: 8049, ends @ 53908
         Atom tkhd @ 45867 of size: 92, ends @ 45959
         Atom mdia @ 45959 of size: 7949, ends @ 53908
             Atom mdhd @ 45967 of size: 32, ends @ 45999
             Atom hdlr @ 45999 of size: 44, ends @ 46043
             Atom minf @ 46043 of size: 7865, ends @ 53908
                 Atom smhd @ 46051 of size: 16, ends @ 46067
                 Atom dinf @ 46067 of size: 36, ends @ 46103
                     Atom dref @ 46075 of size: 28, ends @ 46103
                 Atom stbl @ 46103 of size: 7805, ends @ 53908
                     Atom stsd @ 46111 of size: 69, ends @ 46180
                         Atom samr @ 46127 of size: 53, ends @ 46180
                             Atom damr @ 46163 of size: 17, ends @ 46180
                     Atom stts @ 46180 of size: 32, ends @ 46212
                     Atom stsz @ 46212 of size: 20, ends @ 46232
                     Atom stsc @ 46232 of size: 52, ends @ 46284
                     Atom stco @ 46284 of size: 7624, ends @ 53908
Atom free @ 53908 of size: 351116, ends @ 405024
Atom mdat @ 405024 of size: 39866486, ends @ 40271510

看到这些不要慌张,这其实就是一个MP4文件当中所有的Boxes的树形展示,我们是通过AtomicParsley把它显示出来,看看最外面有ftyp,moov,free和mdat这四个Boxes,Box在有的地方又被称为Atom,是一个意思。如果你不明白free是什么Box,那么你就需要去读下规范了。

具体看下第一行

Atom ftyp @ 0 of size: 24, ends @ 24

表示ftyp从第0个单元开始,大小是24,结束在24单元之前。
其它行以类似的方法可以读懂。

或者你可以去下载一个叫做Mp4Info的图形化工具来查看这棵由Boxes组成的树,不过这个软件只能在Windows下使用,而且有时候会crash,有些Box的Hex数据也显示不全,不过基本上使用还是可以的,查看数据可以用专门的16进制编辑器。我这里用的是一个叫做Bless的工具。
还有很多工具,可以到网上去搜索。

现在以实际例子来分析下。

我们知道第一个Box是ftyp,从0开始,大小是24。那么它的数据就是下面(用Bless打开MP4档案看到的)。

00 00 00 18 66 74 79 70 69 73 6F 6D 00 00 00 00 69 73 6F 6D 33 67 70 34

最开始4个单元是size,其值为00 00 00 18,换成decimal为24

接下来4个单元是type,其值为66 74 79 70,对应’f’, ‘t’, ‘y’, ‘p’这4个字符的ASCII编码

major_brand从第9单元开始,其值为69 73 6F 6D,对应’i’, ‘s’, ‘o’, ‘m’

minor_version从第13单元开始,其值为00 00 00 00,也就是0

compatible_brands从第17单元开始,包含了2组4个单元的数据,也就是8个单元,之前我们说过了,它是不定长的,大小受整个Box的大小控制,其值为69 73 6F 6D 33 67 70 34,对应’i’, ‘s’, ‘o’, ‘m’, ‘3’, ‘g’, ‘p’, ‘4’

这就是ftyp这个Box的所有信息。

接下来我们再看下mvhd,它是moov的header,长度共计108个单元(version为0,其它版本请查看规范),它的数据如下:

00 00 00 6C 6D 76 68 64 00 00 00 00 50 4C 2C 57 50 4C 2C 57 00 00 03 E8 00 03 7B 90 00 01 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 40 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03

规范中MovieHeaderBox的定义如下:

aligned(8) class MovieHeaderBox extends FullBox('mvhd', version, 0) {
	if (version == 1) {
		unsigned int(64) creation_time;
		unsigned int(64) modification_time;
		unsigned int(32) timescale;
		unsigned int(64) duration;
	} else { // version==0
		unsigned int(32) creation_time;
		unsigned int(32) modification_time;
		unsigned int(32) timescale;
		unsigned int(32) duration;
	}

	template int(32) rate = 0x00010000; // typically 1.0
	template int(16) volume = 0x0100; // typically, full volume
	const bit(16) reserved = 0;
	const unsigned int(32)[2] reserved = 0;
	template int(32)[9] matrix =
		{ 0x00010000,0,0,0,0x00010000,0,0,0,0x40000000 }; // Unity matrix
	bit(32)[6] pre_defined = 0;
	unsigned int(32) next_track_ID;
}

最开始4个单元是size,其值为00 00 00 6C,换成decimal为108(6 * 16 + 12)

接下来4个单元是type,其值为6D 76 68 64,对应’m’, ‘v’, ‘h’, ‘d’这4个字符的ASCII编码

接下来就是version, flag, creation_time, modification_time等等

这里有点需要注意的是,规范上说对于version为0和为1有些属性的长度是不一样的,我们这里可以来看一下。

比如这里version是9单元,它只占用一个字节,其值为00,也就是0,那么后面creation_time,modification_time,time scale和duration均每个属性占用4个单元。

creation_time从13单元开始,其值为50 4C 2C 57,这是一个timestamp,转化成string类型表示为”2012-09-09 13:42:47″

modification_time从17单元开始,其值为50 4C 2C 57,如上,所以这个档案是在2012-09-09 13:42:47创建过之后也没有修改过。

time scale从21单元开始,其值为00 00 03 E8,换算成decimal为1000

duration从25单元开始,其值为00 03 7B 90,换算成decimal为228240

该duration是所有track当中最长的那个,在规范当中有写,另外这里的duration是通过time scale表示的。

换算成我们通常的时间坐标系就是228240 / 1000 = 228.24,约为228秒,即228240毫秒。

媒体档案格式基本就是按照此类方法来分析的,比如这段视频有几个track,以及是分别是什么track,就只需要去解析它的moov/trak的Box。

遇到不是很明白的概念的时候尽可能的先到规范当中找找看有没有定义,一般解释的都还是比较清楚。比如下面这个最基本的。

[mvhd]
timescale is an integer that specifies the time-scale for the entire presentation; this is the number of time units that pass in one second. For example, a time coordinate system that measures time in sixtieths of a second has a time scale of 60.

[mvhd]
duration is an integer that declares length of the presentation (in the indicated timescale). This property is derived from the presentation’s tracks: the value of this field corresponds to the duration of the longest track in the presentation. If the duration cannot be determined then duration is set to all 1s.

[mvhd]
rate is a fixed point 16.16 number that indicates the preferred rate to play the presentation; 1.0 (0x00010000) is normal forward playback

[trak]
duration is an integer that indicates the duration of this track (in the timescale indicated in the Movie Header Box). The value of this field is equal to the sum of the durations of all of the track’s edits. If there is no edit list, then the duration is the sum of the sample durations, converted into the timescale in the Movie Header Box. If the duration of this track cannot be determined then duration is set to all 1s.

[trak]
layer specifies the front-to-back ordering of video tracks; tracks with lower numbers are closer to the viewer. 0 is the normal value, and -1 would be in front of track 0, and so on.

有根据规范和AOSP的代码写一个在Linux x86_64下验证过的MPEG4文件解析工具,https://github.com/guohai/gmpe4,欢迎大家使用,提意见或者完善它。

推荐参考的资料:
MP4文件格式解析系列(http://blog.sina.com.cn/s/blog_48f93b530100jz4b.html)
QuickTime container(http://wiki.multimedia.cx/index.php?title=QuickTime_container)
http://mp4ra.org/atoms.html

初探Camera HAL 2.0

Android在4.2的时候对Camera HAL做了比较大的改动,基本是废弃了原先的CameraHardwareInterface,又弄了一套新的。所以它提供了两种方式实现,根据厂商实现HAL的版本在Camera Service层自动加载对应版本的fwk HAL。目前这块的介绍还是比较少,实现的厂商也比较少,大概有Qualcomm和Samsung在其platform上有去实现。
以下文字是以Qualcomm平台为基础,在AOSP代码基础上得出的一些理解,存在错误的地方请指出,本文也会随着进一步的学习而对错误的地方进行修正!

我有观看这样一个介绍的视频(题外话,开挂国家的男人讲的英语基本听不懂),自己想法出去看,因为视频无法下载下来,所以我截图了一些,放在Flickr上。
www.youtube.com/watch?v=Lald5txnoHw
以下是一段有关这段视频以及HAL 2.0的简单介绍

The Linux Foundation
Android Builders Summit 2013
Camera 2.0: The New Camera Hardware Interface in Android 4.2
By Balwinder Kaur & Ashutosh Gupta
San Francisco, California

Android 4.2 was released with a new Camera Hardware Abstraction Layer (HAL) Camera 2.0. Camera 2.0 has a big emphasis on collection and providing metadata associated with each frame. It also provides the ability to re-process streams. Although the APIs at the SDK level are yet to expose any new APIS to the end user, the Camera HAL and the Camera Service architecture has been revamped to a different architecture. This presentation provides an insight into the new architecture as well as covering some of the challenges faced in building production quality Camera HAL implementations.
The intended audience for this conference session is engineers who want to learn more about the Android Camera Internals. This talk should facilitate engineers wanting to integrate, improve or innovate using the Camera subsystem.

以下是我自己的一些理解。

HAL2如何同DRV沟通的?

HAL2 fwk和Vendor implementation是通过/path/to/aosp/hardware/qcom/camera/QCamera/HAL2/wrapper/QualcommCamera.cpp来绑定的,初始化名字为HMI的struct,这个跟原先的是一样的。

static hw_module_methods_t camera_module_methods = {
    open: camera_device_open,
};

static hw_module_t camera_common  = {
    tag: HARDWARE_MODULE_TAG,
    module_api_version: CAMERA_MODULE_API_VERSION_2_0,
    hal_api_version: HARDWARE_HAL_API_VERSION,
    id: CAMERA_HARDWARE_MODULE_ID,
    name: "Qcamera",
    author:"Qcom",
    methods: &camera_module_methods,
    dso: NULL,
    reserved:  {0},
};

camera_module_t HAL_MODULE_INFO_SYM = {
    common: camera_common,
    get_number_of_cameras: get_number_of_cameras,
    get_camera_info: get_camera_info,
};

camera2_device_ops_t camera_ops = {
    set_request_queue_src_ops:           android::set_request_queue_src_ops,
    notify_request_queue_not_empty:      android::notify_request_queue_not_empty,
    set_frame_queue_dst_ops:             android::set_frame_queue_dst_ops,
    get_in_progress_count:               android::get_in_progress_count,
    flush_captures_in_progress:          android::flush_captures_in_progress,
    construct_default_request:           android::construct_default_request,

    allocate_stream:                     android::allocate_stream,
    register_stream_buffers:             android::register_stream_buffers,
    release_stream:                      android::release_stream,

    allocate_reprocess_stream:           android::allocate_reprocess_stream,
    allocate_reprocess_stream_from_stream: android::allocate_reprocess_stream_from_stream,
    release_reprocess_stream:            android::release_reprocess_stream,

    trigger_action:                      android::trigger_action,
    set_notify_callback:                 android::set_notify_callback,
    get_metadata_vendor_tag_ops:         android::get_metadata_vendor_tag_ops,
    dump:                                android::dump,
};

QCameraHWI当中

QCameraHardwareInterface::
QCameraHardwareInterface(int cameraId, int mode)
                  : mCameraId(cameraId)
{

    cam_ctrl_dimension_t mDimension;

    /* Open camera stack! */
    memset(&mMemHooks, 0, sizeof(mm_camear_mem_vtbl_t));
    mMemHooks.user_data=this;
    mMemHooks.get_buf=get_buffer_hook;
    mMemHooks.put_buf=put_buffer_hook;

    mCameraHandle=camera_open(mCameraId, &mMemHooks);
    ALOGV("Cam open returned %p",mCameraHandle);
    if(mCameraHandle == NULL) {
        ALOGE("startCamera: cam_ops_open failed: id = %d", mCameraId);
        return;
    }
    mCameraHandle->ops->sync(mCameraHandle->camera_handle);

    mChannelId=mCameraHandle->ops->ch_acquire(mCameraHandle->camera_handle);
    if(mChannelId<=0)
    {
        ALOGE("%s:Channel aquire failed",__func__);
        mCameraHandle->ops->camera_close(mCameraHandle->camera_handle);
        return;
    }

    /* Initialize # of frame requests the HAL is handling to zero*/
    mPendingRequests=0;
}

调用mm_camera_interface当中的camera_open()

进而调用mm_camera当中的mm_camera_open()

这里还是是通过V4L2去同DRV沟通的

但是为什么wrapper当中有一些操作Qualcomm没有去实现呢?

int trigger_action(const struct camera2_device *,
        uint32_t trigger_id,
        int32_t ext1,
        int32_t ext2)
{
    return INVALID_OPERATION;
}

比如这个上面理论上来说auto focus等等一些操作需要通过它来触发,但是它却是stub实现,这些不是必须的,还是藏在了某个角落我没有发现?

目前用的是libmmcamera_interface还是libmmcamera_interface2?
从代码看应该是libmmcamera_interface

再看一个实例,start preview 这是一个怎样的过程?
Camera2Client::startPreview(…)
Camera2Client::startPreviewL(…)
StreamingProcessor::updatePreviewStream(…)
Camera2Device::createStream(…)
Camera2Device::StreamAdapter::connectToDevice(…)
camera2_device_t->ops->allocate_stream(…)

这个allocate_stream是Vendor实现的,对于Qualcomm的Camera,位于/path/to/aosp/hardware/qcom/camera/QCamera/HAL2/wrapper/QualcommCamera.cpp

android::allocate_stream(…)
QCameraHardwareInterface::allocate_stream(…)
QCameraStream::createInstance(…)

QCameraStream_preview::createInstance(uint32_t CameraHandle,
                        uint32_t ChannelId,
                        uint32_t Width,
                        uint32_t Height,
                        int requestedFormat,
                        mm_camera_vtbl_t *mm_ops,
                        camera_mode_t mode)
{
  QCameraStream* pme = new QCameraStream_preview(CameraHandle,
                        ChannelId,
                        Width,
                        Height,
                        requestedFormat,
                        mm_ops,
                        mode);
  return pme;
}

尽管这改进目前还是还是算比较“新”的一个东西,但这是趋势,所以了解下也无妨!
P.S. 4.3 在今天都发布了,我还在看4.2的东西,吼吼

FLAC encoder是如何集成到Stagefright的(OpenMAX IL)

举个例子来看下,codec是如何和OpenMAX IL沟通的,这其实应该是Stagefright和codec的交互这篇博客当中的,想想比较独立,又比较长,干脆单独拿出来写一下。

通常来说一个codec(encoder或者decoder)无论怎么复杂,它总会提供一个接口来是我们输入一段数据,并且提供一个接口我们来获取一段数据,因为它的功能就是这样的。
所以我们要把一个codec使用OpenMAX IL的方式集成进来,对我们来说工作量不算很大。这里以FLAC encoder来举个例子(因为它比较简单),FLAC本身这个codec相关的信息请参见http://xiph.org/flac/,在Android当中源码位于/path/to/aosp/external/flac/,在Android当中OpenMAX IL与它交互是通过libstagefright_soft_flacenc.so这个东西,我们今天就重点关注这个,它的源码位于/path/to/aosp/frameworks/av/media/libstagefright/codecs/flac/enc/。

对Android当中Stagefright和OpenMAX IL不熟悉的话,还是建议先看文章开始提到的那篇。

首先SoftFlacEncoder继承自SimpleSoftOMXComponent,重写了4个方法,分别是

virtual OMX_ERRORTYPE initCheck() const; // 在SoftOMXComponent当中是空实现,自己的codec需要实现来让他人通过这个方法来探测你的codec是否正常
virtual OMX_ERRORTYPE internalGetParameter(
        OMX_INDEXTYPE index, OMX_PTR params); // 就是OpenMax IL组件的getParameter

virtual OMX_ERRORTYPE internalSetParameter(
        OMX_INDEXTYPE index, const OMX_PTR params); // 就是OpenMax IL组件的setParameter

virtual void onQueueFilled(OMX_U32 portIndex); // 就是OpenMax IL组件的emptyThisBuffer和fillThisBuffer,如果你不清楚,并且还在看这篇文章的话,那你一定是在当散文看 ^_^

这四个方法如果你不明白,就先翻翻OpenMax IL/Stagefright相关代码或者规范。

另外它还有几个私有方法

void initPorts(); // OpenMAX IL通信需要port

OMX_ERRORTYPE configureEncoder(); // FLAC本身也需要些配置,比如把下面的callback设置进去

// FLAC encoder callbacks
// maps to encoderEncodeFlac()
static FLAC__StreamEncoderWriteStatus flacEncoderWriteCallback(
        const FLAC__StreamEncoder *encoder, const FLAC__byte buffer[],
        size_t bytes, unsigned samples, unsigned current_frame, void *client_data); // 这个是实际丢下去的callback

FLAC__StreamEncoderWriteStatus onEncodedFlacAvailable(
            const FLAC__byte buffer[],
            size_t bytes, unsigned samples, unsigned current_frame); // 这个只是做了个C向C++的转换

上面这两个方法是callback函数,其实就一个callback,因为如下

// static
FLAC__StreamEncoderWriteStatus SoftFlacEncoder::flacEncoderWriteCallback(
            const FLAC__StreamEncoder *encoder, const FLAC__byte buffer[],
            size_t bytes, unsigned samples, unsigned current_frame, void *client_data) {
    return ((SoftFlacEncoder*) client_data)->onEncodedFlacAvailable(
            buffer, bytes, samples, current_frame);
}

为什么要有callback?之前有讲过一个codec总有一个输入和输出,在这里FLAC有给我们一个函数FLAC__stream_encoder_process_interleaved(…)来输入数据,提供了一个callback来输出数据,仅仅这样!

可以参照下FLAC头文件的说明

/** Submit data for encoding.
 *  This version allows you to supply the input data where the channels
 *  are interleaved into a single array (i.e. channel0_sample0,
 *  channel1_sample0, ... , channelN_sample0, channel0_sample1, ...).
 *  The samples need not be block-aligned but they must be
 *  sample-aligned, i.e. the first value should be channel0_sample0
 *  and the last value channelN_sampleM.  Each sample should be a signed
 *  integer, right-justified to the resolution set by
 *  FLAC__stream_encoder_set_bits_per_sample().  For example, if the
 *  resolution is 16 bits per sample, the samples should all be in the
 *  range [-32768,32767].
 *
 *  For applications where channel order is important, channels must
 *  follow the order as described in the
 *  <A HREF="../format.html#frame_header">frame header</A>.
 *
 * \param  encoder  An initialized encoder instance in the OK state.
 * \param  buffer   An array of channel-interleaved data (see above).
 * \param  samples  The number of samples in one channel, the same as for
 *                  FLAC__stream_encoder_process().  For example, if
 *                  encoding two channels, \c 1000 \a samples corresponds
 *                  to a \a buffer of 2000 values.
 * \assert
 *    \code encoder != NULL \endcode
 *    \code FLAC__stream_encoder_get_state(encoder) == FLAC__STREAM_ENCODER_OK \endcode
 * \retval FLAC__bool
 *    \c true if successful, else \c false; in this case, check the
 *    encoder state with FLAC__stream_encoder_get_state() to see what
 *    went wrong.
 */
FLAC_API FLAC__bool FLAC__stream_encoder_process_interleaved(FLAC__StreamEncoder *encoder, const FLAC__int32 buffer[], unsigned samples);

这个就是注册callback的地方,我们注册的是FLAC__StreamEncoderWriteCallback这个callback。

/** Initialize the encoder instance to encode native FLAC streams.
 *
 *  This flavor of initialization sets up the encoder to encode to a
 *  native FLAC stream. I/O is performed via callbacks to the client.
 *  For encoding to a plain file via filename or open \c FILE*,
 *  FLAC__stream_encoder_init_file() and FLAC__stream_encoder_init_FILE()
 *  provide a simpler interface.
 *
 *  This function should be called after FLAC__stream_encoder_new() and
 *  FLAC__stream_encoder_set_*() but before FLAC__stream_encoder_process()
 *  or FLAC__stream_encoder_process_interleaved().
 *  initialization succeeded.
 *
 *  The call to FLAC__stream_encoder_init_stream() currently will also
 *  immediately call the write callback several times, once with the \c fLaC
 *  signature, and once for each encoded metadata block.
 *
 * \param  encoder            An uninitialized encoder instance.
 * \param  write_callback     See FLAC__StreamEncoderWriteCallback.  This
 *                            pointer must not be \c NULL.
 * \param  seek_callback      See FLAC__StreamEncoderSeekCallback.  This
 *                            pointer may be \c NULL if seeking is not
 *                            supported.  The encoder uses seeking to go back
 *                            and write some some stream statistics to the
 *                            STREAMINFO block; this is recommended but not
 *                            necessary to create a valid FLAC stream.  If
 *                            \a seek_callback is not \c NULL then a
 *                            \a tell_callback must also be supplied.
 *                            Alternatively, a dummy seek callback that just
 *                            returns \c FLAC__STREAM_ENCODER_SEEK_STATUS_UNSUPPORTED
 *                            may also be supplied, all though this is slightly
 *                            less efficient for the encoder.
 * \param  tell_callback      See FLAC__StreamEncoderTellCallback.  This
 *                            pointer may be \c NULL if seeking is not
 *                            supported.  If \a seek_callback is \c NULL then
 *                            this argument will be ignored.  If
 *                            \a seek_callback is not \c NULL then a
 *                            \a tell_callback must also be supplied.
 *                            Alternatively, a dummy tell callback that just
 *                            returns \c FLAC__STREAM_ENCODER_TELL_STATUS_UNSUPPORTED
 *                            may also be supplied, all though this is slightly
 *                            less efficient for the encoder.
 * \param  metadata_callback  See FLAC__StreamEncoderMetadataCallback.  This
 *                            pointer may be \c NULL if the callback is not
 *                            desired.  If the client provides a seek callback,
 *                            this function is not necessary as the encoder
 *                            will automatically seek back and update the
 *                            STREAMINFO block.  It may also be \c NULL if the
 *                            client does not support seeking, since it will
 *                            have no way of going back to update the
 *                            STREAMINFO.  However the client can still supply
 *                            a callback if it would like to know the details
 *                            from the STREAMINFO.
 * \param  client_data        This value will be supplied to callbacks in their
 *                            \a client_data argument.
 * \assert
 *    \code encoder != NULL \endcode
 * \retval FLAC__StreamEncoderInitStatus
 *    \c FLAC__STREAM_ENCODER_INIT_STATUS_OK if initialization was successful;
 *    see FLAC__StreamEncoderInitStatus for the meanings of other return values.
 */
FLAC_API FLAC__StreamEncoderInitStatus FLAC__stream_encoder_init_stream(FLAC__StreamEncoder *encoder, FLAC__StreamEncoderWriteCallback write_callback, FLAC__StreamEncoderSeekCallback seek_callback, FLAC__StreamEncoderTellCallback tell_callback, FLAC__StreamEncoderMetadataCallback metadata_callback, void *client_data);

简单介绍下这代码的意思
initPorts()初始化两个ports,0为输入,1为输出,记住一点很重要,谁申请的内存,谁释放,这点对于理解层次复杂的这套系统来说很重要。

最重要的是下面这个方法,上层把buffer满之后会call到这里,上次需要获取处理好的buffer也会call到这里

void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) {

    ALOGV("SoftFlacEncoder::onQueueFilled(portIndex=%ld)", portIndex);

    if (mSignalledError) { // 出错了
        return;
    }

    List<BufferInfo *> &inQueue = getPortQueue(0); // 输入口,好奇这里一次会不会有多个buffer,理论上来同一次就一个
    List<BufferInfo *> &outQueue = getPortQueue(1); // 输出的

    while (!inQueue.empty() && !outQueue.empty()) {
        BufferInfo *inInfo = *inQueue.begin();
        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;

        BufferInfo *outInfo = *outQueue.begin();
        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;

        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { // 编解码结束的时候,都清空标记,notifyEmptyBufferDone和notifyFillBufferDone都返回
            inQueue.erase(inQueue.begin()); // 这是我们自己申请的内存,自己释放
            inInfo->mOwnedByUs = false;
            notifyEmptyBufferDone(inHeader);

            outHeader->nFilledLen = 0;
            outHeader->nFlags = OMX_BUFFERFLAG_EOS;

            outQueue.erase(outQueue.begin()); // 这是我们自己申请的内存,自己释放
            outInfo->mOwnedByUs = false;
            notifyFillBufferDone(outHeader);

            return;
        }

        if (inHeader->nFilledLen > kMaxNumSamplesPerFrame * sizeof(FLAC__int32) * 2) {
            ALOGE("input buffer too large (%ld).", inHeader->nFilledLen);
            mSignalledError = true;
            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
            return;
        }

        assert(mNumChannels != 0);
        mEncoderWriteData = true;
        mEncoderReturnedEncodedData = false;
        mEncoderReturnedNbBytes = 0;
        mCurrentInputTimeStamp = inHeader->nTimeStamp;

        const unsigned nbInputFrames = inHeader->nFilledLen / (2 * mNumChannels);
        const unsigned nbInputSamples = inHeader->nFilledLen / 2;
        const OMX_S16 * const pcm16 = reinterpret_cast<OMX_S16 *>(inHeader->pBuffer); // OMX_BUFFERHEADERTYPE当中pBuffer默认为8位,这里强制按16为重新分组,所以一个buffer当中samples的数量就是nFilledLen/2
        // 可是为什么要按照16位重新分组?因为设置给codec的FLAC__stream_encoder_set_bits_per_sample(mFlacStreamEncoder, 16);

        for (unsigned i=0 ; i < nbInputSamples ; i++) {
            mInputBufferPcm32[i] = (FLAC__int32) pcm16[i]; // 按32位解析,因为FLAC需要
        }
        ALOGV(" about to encode %u samples per channel", nbInputFrames);
        FLAC__bool ok = FLAC__stream_encoder_process_interleaved(
                        mFlacStreamEncoder,
                        mInputBufferPcm32,
                        nbInputFrames /*samples per channel*/ ); // 在这里编码,因为FLAC对齐的关系,对输入的buffer需要做一定的转换,也就是上面标记出来的两处

        if (ok) {
            // 之前注册的callback应该是block的,也就是在FLAC__stream_encoder_process_interleaved返回callback因该也返回了
            if (mEncoderReturnedEncodedData && (mEncoderReturnedNbBytes != 0)) {
                ALOGV(" dequeueing buffer on output port after writing data");
                outInfo->mOwnedByUs = false;
                outQueue.erase(outQueue.begin());
                outInfo = NULL;
                notifyFillBufferDone(outHeader); // 编码好的数据通过这里就回到了上层
                outHeader = NULL;
                mEncoderReturnedEncodedData = false;
            } else {
                ALOGV(" encoder process_interleaved returned without data to write");
            }
        } else {
            ALOGE(" error encountered during encoding");
            mSignalledError = true;
            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
            return;
        }

        inInfo->mOwnedByUs = false;
        inQueue.erase(inQueue.begin());
        inInfo = NULL;
        notifyEmptyBufferDone(inHeader); // 告诉上层,FLAC codec已经取走了in buffer里面的数据,这样看来这里就是上层送数据,codec编码,codec送数据,然后开始第二轮
        // 不知道有没有那种实现,就是上层送数据,只要codec告诉上层已经取走了数据,上层也可以继续送数据,即使codec这个时候还没有编码完毕,还没有把上一次的数据传送给上层,也许这样在某些情况下可以加快互等拷贝/准备数据的时间,不过这是soft编码,都会占用CPU的时间,所以多CPU上也许可以改进,但那样应该会复杂些
        inHeader = NULL;
    }
}

接下来看callback里面

FLAC__StreamEncoderWriteStatus SoftFlacEncoder::onEncodedFlacAvailable(
            const FLAC__byte buffer[],
            size_t bytes, unsigned samples, unsigned current_frame) {
    ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%d, samples=%d, curr_frame=%d)",
            bytes, samples, current_frame);

#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
    if (samples == 0) {
        ALOGI(" saving %d bytes of header", bytes);
        memcpy(mHeader + mHeaderOffset, buffer, bytes);
        mHeaderOffset += bytes;// will contain header size when finished receiving header
        return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
    }

#endif

    if ((samples == 0) || !mEncoderWriteData) {
        // called by the encoder because there's header data to save, but it's not the role
        // of this component (unless WRITE_FLAC_HEADER_IN_FIRST_BUFFER is defined)
        ALOGV("ignoring %d bytes of header data (samples=%d)", bytes, samples);
        return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
    }

    List<BufferInfo *> &outQueue = getPortQueue(1);
    CHECK(!outQueue.empty());
    BufferInfo *outInfo = *outQueue.begin();
    OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;

#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
    if (!mWroteHeader) {
        ALOGI(" writing %d bytes of header on output port", mHeaderOffset);
        memcpy(outHeader->pBuffer + outHeader->nOffset + outHeader->nFilledLen,
                mHeader, mHeaderOffset);
        outHeader->nFilledLen += mHeaderOffset;
        outHeader->nOffset    += mHeaderOffset;
        mWroteHeader = true;
    }
#endif

    // write encoded data
    ALOGV(" writing %d bytes of encoded data on output port", bytes);
    if (bytes > outHeader->nAllocLen - outHeader->nOffset - outHeader->nFilledLen) {
        ALOGE(" not enough space left to write encoded data, dropping %u bytes", bytes);
        // a fatal error would stop the encoding
        return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
    }
    memcpy(outHeader->pBuffer + outHeader->nOffset, buffer, bytes); // 拷贝数据到OpenMAX IL的buffer里面

    outHeader->nTimeStamp = mCurrentInputTimeStamp;
    outHeader->nOffset = 0;
    outHeader->nFilledLen += bytes;
    outHeader->nFlags = 0;

    mEncoderReturnedEncodedData = true;
    mEncoderReturnedNbBytes += bytes;

    return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
}

需要注意的地方

// FLAC takes samples aligned on 32bit boundaries, use this buffer for the conversion
// before passing the input data to the encoder
FLAC__int32* mInputBufferPcm32; // 这个问题上面已经分析过了

短短几百行代码,这个算简单!

Driver回来的数据是如何显示出来的

想要知道Driver回来的数据是如何显示出来的吗?
这个工作是在哪一层做的?

跟踪代码就可以理清楚这一过程,这里代码版本是Jelly Bean。

CameraHardwareInterface初始化的时候会初始化好window有关的数据

struct camera_preview_window {
    struct preview_stream_ops nw;
    void *user;
};

struct camera_preview_window mHalPreviewWindow;

status_t initialize(hw_module_t *module)
{
    ALOGI("Opening camera %s", mName.string());
    int rc = module->methods->open(module, mName.string(),
                                   (hw_device_t **)&mDevice);
    if (rc != OK) {
        ALOGE("Could not open camera %s: %d", mName.string(), rc);
        return rc;
    }
    initHalPreviewWindow();
    return rc;
}

void initHalPreviewWindow()
{
    mHalPreviewWindow.nw.cancel_buffer = __cancel_buffer;
    mHalPreviewWindow.nw.lock_buffer = __lock_buffer;
    mHalPreviewWindow.nw.dequeue_buffer = __dequeue_buffer;
    mHalPreviewWindow.nw.enqueue_buffer = __enqueue_buffer;
    mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count;
    mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry;
    mHalPreviewWindow.nw.set_crop = __set_crop;
    mHalPreviewWindow.nw.set_timestamp = __set_timestamp;
    mHalPreviewWindow.nw.set_usage = __set_usage;
    mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval;

    mHalPreviewWindow.nw.get_min_undequeued_buffer_count =
            __get_min_undequeued_buffer_count;
}

static int __dequeue_buffer(struct preview_stream_ops* w,
                            buffer_handle_t** buffer, int *stride)
{
    int rc;
    ANativeWindow *a = anw(w);
    ANativeWindowBuffer* anb;
    rc = native_window_dequeue_buffer_and_wait(a, &anb);
    if (!rc) {
        *buffer = &anb->handle;
        *stride = anb->stride;
    }
    return rc;
}

static int __enqueue_buffer(struct preview_stream_ops* w,
                  buffer_handle_t* buffer)
{
    ANativeWindow *a = anw(w);
    return a->queueBuffer(a,
              container_of(buffer, ANativeWindowBuffer, handle), -1);
}

继而在Vendor的HAL当中,这里以TI为例子

/**
   @brief Sets ANativeWindow object.

   Preview buffers provided to CameraHal via this object. DisplayAdapter will be interfacing with it
   to render buffers to display.

   @param[in] window The ANativeWindow object created by Surface flinger
   @return NO_ERROR If the ANativeWindow object passes validation criteria
   @todo Define validation criteria for ANativeWindow object. Define error codes for scenarios

 */
status_t CameraHal::setPreviewWindow(struct preview_stream_ops *window)
{
    status_t ret = NO_ERROR;
    CameraAdapter::BuffersDescriptor desc;

    LOG_FUNCTION_NAME;
    mSetPreviewWindowCalled = true;

   ///If the Camera service passes a null window, we destroy existing window and free the DisplayAdapter
    if(!window)
    {
        if(mDisplayAdapter.get() != NULL)
        {
            ///NULL window passed, destroy the display adapter if present
            CAMHAL_LOGDA("NULL window passed, destroying display adapter");
            mDisplayAdapter.clear();
            ///@remarks If there was a window previously existing, we usually expect another valid window to be passed by the client
            ///@remarks so, we will wait until it passes a valid window to begin the preview again
            mSetPreviewWindowCalled = false;
        }
        CAMHAL_LOGDA("NULL ANativeWindow passed to setPreviewWindow");
        return NO_ERROR;
    }else if(mDisplayAdapter.get() == NULL)
    {
        // Need to create the display adapter since it has not been created
        // Create display adapter
        mDisplayAdapter = new ANativeWindowDisplayAdapter();

        ...

        // DisplayAdapter needs to know where to get the CameraFrames from inorder to display
        // Since CameraAdapter is the one that provides the frames, set it as the frame provider for DisplayAdapter
        mDisplayAdapter->setFrameProvider(mCameraAdapter);

        ...

        // Update the display adapter with the new window that is passed from CameraService
        // 这里的window(ANativeWindowDisplayAdapter::mANativeWindow)就是CameraHardwareInterface::mHalPreviewWindow::nw
        ret  = mDisplayAdapter->setPreviewWindow(window);
        if(ret!=NO_ERROR)
            {
            CAMHAL_LOGEB("DisplayAdapter setPreviewWindow returned error %d", ret);
            }

        if(mPreviewStartInProgress)
        {
            CAMHAL_LOGDA("setPreviewWindow called when preview running");
            // Start the preview since the window is now available
            ret = startPreview();
        }
    } else {
        // Update the display adapter with the new window that is passed from CameraService
        ret = mDisplayAdapter->setPreviewWindow(window);
        if ( (NO_ERROR == ret) && previewEnabled() ) {
            restartPreview();
        } else if (ret == ALREADY_EXISTS) {
            // ALREADY_EXISTS should be treated as a noop in this case
            ret = NO_ERROR;
        }
    }
    LOG_FUNCTION_NAME_EXIT;

    return ret;

}

注意看setPreviewWindow之前我加的一行中文注释

在TI的HAL当中有去做包一些adaper出来,实际就是取数据的(比如V4L2CameraAdapter,也就是CameraAdapter),和数据发其他地方(比如ANativeWindowDisplayAdapter,也就是DisplayAdapter)去用的。

在TI的HW当中,有previewThread(位于V4LCameraAdapter当中),previewThread会不断的通过IOCTL从V4L2获取frame

int V4LCameraAdapter::previewThread()
{
    status_t ret = NO_ERROR;
    int width, height;
    CameraFrame frame;

    if (mPreviewing)
        {
        int index = 0;
        char *fp = this->GetFrame(index);

        uint8_t* ptr = (uint8_t*) mPreviewBufs.keyAt(index);

        int width, height;
        uint16_t* dest = (uint16_t*)ptr;
        uint16_t* src = (uint16_t*) fp;
        mParams.getPreviewSize(&width, &height);

        ...

        mParams.getPreviewSize(&width, &height);
        frame.mFrameType = CameraFrame::PREVIEW_FRAME_SYNC;
        frame.mBuffer = ptr;

        ...

        ret = sendFrameToSubscribers(&frame);

        }

    return ret;
}

char * V4LCameraAdapter::GetFrame(int &index)
{
    int ret;

    mVideoInfo->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    mVideoInfo->buf.memory = V4L2_MEMORY_MMAP;

    /* DQ */
    ret = ioctl(mCameraHandle, VIDIOC_DQBUF, &mVideoInfo->buf);
    if (ret < 0) {
        CAMHAL_LOGEA("GetFrame: VIDIOC_DQBUF Failed");
        return NULL;
    }
    nDequeued++;

    index = mVideoInfo->buf.index;

    return (char *)mVideoInfo->mem[mVideoInfo->buf.index];
}



class CameraFrame
{
    public:

    enum FrameType
        {
            PREVIEW_FRAME_SYNC = 0x1, ///SYNC implies that the frame needs to be explicitly returned after consuming in order to be filled by camera again
            PREVIEW_FRAME = 0x2   , ///Preview frame includes viewfinder and snapshot frames
            IMAGE_FRAME_SYNC = 0x4, ///Image Frame is the image capture output frame
            IMAGE_FRAME = 0x8,
            VIDEO_FRAME_SYNC = 0x10, ///Timestamp will be updated for these frames
            VIDEO_FRAME = 0x20,
            FRAME_DATA_SYNC = 0x40, ///Any extra data assosicated with the frame. Always synced with the frame
            FRAME_DATA= 0x80,
            RAW_FRAME = 0x100,
            SNAPSHOT_FRAME = 0x200,
            ALL_FRAMES = 0xFFFF   ///Maximum of 16 frame types supported
        };

    enum FrameQuirks
    {
        ENCODE_RAW_YUV422I_TO_JPEG = 0x1 << 0,
        HAS_EXIF_DATA = 0x1 << 1,
    };

    //default contrustor
    CameraFrame():
        ...
        {
        ...
    }

    //copy constructor
    CameraFrame(const CameraFrame &frame) :
        ...
        {
        ...
    }

    void *mCookie;
    void *mCookie2;
    void *mBuffer;
    int mFrameType;
    nsecs_t mTimestamp;
    unsigned int mWidth, mHeight;
    uint32_t mOffset;
    unsigned int mAlignment;
    int mFd;
    size_t mLength;
    unsigned mFrameMask;
    unsigned int mQuirks;
    unsigned int mYuv[2];
    ///@todo add other member vars like  stride etc
};

对于ANativeWindowDisplayAdapter,它有给自己配置一个FrameProvider,实际就是会去注册一种callback,当从driver回来数据的时候会去通知对应的callback,当然这个时候还没有真正的去注册

int ANativeWindowDisplayAdapter::setFrameProvider(FrameNotifier *frameProvider)
{
    LOG_FUNCTION_NAME;

    // Check for NULL pointer
    if ( !frameProvider ) {
        CAMHAL_LOGEA("NULL passed for frame provider");
        LOG_FUNCTION_NAME_EXIT;
        return BAD_VALUE;
    }

    //Release any previous frame providers
    if ( NULL != mFrameProvider ) {
        delete mFrameProvider;
    }

    /** Dont do anything here, Just save the pointer for use when display is
         actually enabled or disabled
    */
    mFrameProvider = new FrameProvider(frameProvider, this, frameCallbackRelay);

    LOG_FUNCTION_NAME_EXIT;

    return NO_ERROR;
}

并且我们在开始start preview的时候会去注册这个callback,当然同时在start preview的时候我们也会去分配一块buffer,这样driver回来的数据才有地方放,申请buffer如下
CameraHal::allocateBuffer(…) __dequeue_buffer

下面是启动living preview,注册callback

int ANativeWindowDisplayAdapter::enableDisplay(int width, int height, struct timeval *refTime, S3DParameters *s3dParams)
{
    Semaphore sem;
    TIUTILS::Message msg;

    LOG_FUNCTION_NAME;

    if ( mDisplayEnabled )
        {
        CAMHAL_LOGDA("Display is already enabled");
        LOG_FUNCTION_NAME_EXIT;

        return NO_ERROR;
    }

#if 0 //TODO: s3d is not part of bringup...will reenable
    if (s3dParams)
        mOverlay->set_s3d_params(s3dParams->mode, s3dParams->framePacking,
                                    s3dParams->order, s3dParams->subSampling);
#endif

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

    if ( NULL != refTime )
        {
        Mutex::Autolock lock(mLock);
        memcpy(&mStandbyToShot, refTime, sizeof(struct timeval));
        mMeasureStandby = true;
    }

#endif

    //Send START_DISPLAY COMMAND to display thread. Display thread will start and then wait for a message
    sem.Create();
    msg.command = DisplayThread::DISPLAY_START;

    // Send the semaphore to signal once the command is completed
    msg.arg1 = &sem;

    ///Post the message to display thread
    mDisplayThread->msgQ().put(&msg);

    ///Wait for the ACK - implies that the thread is now started and waiting for frames
    sem.Wait();

    // Register with the frame provider for frames
    mFrameProvider->enableFrameNotification(CameraFrame::PREVIEW_FRAME_SYNC);

    mDisplayEnabled = true;
    mPreviewWidth = width;
    mPreviewHeight = height;

    CAMHAL_LOGVB("mPreviewWidth = %d mPreviewHeight = %d", mPreviewWidth, mPreviewHeight);

    LOG_FUNCTION_NAME_EXIT;

    return NO_ERROR;
}



int FrameProvider::enableFrameNotification(int32_t frameTypes)
{
    LOG_FUNCTION_NAME;
    status_t ret = NO_ERROR;

    ///Enable the frame notification to CameraAdapter (which implements FrameNotifier interface)
    mFrameNotifier->enableMsgType(frameTypes<<MessageNotifier::FRAME_BIT_FIELD_POSITION
                                    , mFrameCallback
                                    , NULL
                                    , mCookie
                                    );

    // 把frameCallbackRelay这个callback添加到BaseCameraAdapter::mFrameSubscribers当中
    // 详见 preview_stream_ops_t*  mANativeWindow; 以及 ANativeWindowDisplayAdapter::PostFrame(...)
    // __enqueue_buffer
    // 最终会把数据更新到CameraHardwareInterface的sp<ANativeWindow> mPreviewWindow;
    // 这样每过来一个frame,也就是去更新我们看到的preview数据

    LOG_FUNCTION_NAME_EXIT;
    return ret;
}

见上面代码中嵌的中文注释,这是我加的。

另外displayThread(位于ANativeWindowDisplayAdapter当中),displayThread会听一些从Camera HAL来的消息,然后执行,这个不是很复杂。

也就是说数据是怎么贴到view上去,这是HAL这层做掉的,虽然只是一个调用ANativeWindow的过程!

这只是一个粗糙的过程,再深层细节的我们暂时不研究。

Android的HAL和Vendor的HAL实现是如何关联起来的

以TI的platform为例子,也就是这两个文件
/path/to/aosp/frameworks/av/services/camera/libcameraservice/CameraHardwareInterface.h

/path/to/aosp/hardware/ti/omap4xxx/camera/CameraHal.cpp
是如何关联的。

谁来加载camera.$platform$.so,这个是真正的HAL的实现,里面实际最终会去调用Driver(中间透过rpmsg-omx1)。
好突然,可能你现在还不知道camera.$platform$.so是什么?是这样的,但是到你看到后面你就会发现Vendor的HAL实现是包成一个so档的,也就是这个。

void CameraService::onFirstRef()
{
    BnCameraService::onFirstRef();

    if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,
                (const hw_module_t **)&mModule) < 0) {
        ALOGE("Could not load camera HAL module");
        mNumberOfCameras = 0;
    }
    else {
        mNumberOfCameras = mModule->get_number_of_cameras();
        if (mNumberOfCameras > MAX_CAMERAS) {
            ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
                    mNumberOfCameras, MAX_CAMERAS);
            mNumberOfCameras = MAX_CAMERAS;
        }
        for (int i = 0; i < mNumberOfCameras; i++) {
            setCameraFree(i);
        }
    }
}

在/path/to/aosp/hardware/libhardware/hardware.c当中有下面这些方法

hw_get_module(const char *id, const struct hw_module_t **module) // 这里定义的是hw_module_t,但是Vendor HAL当中实现的是camera_module_t
/* Loop through the configuration variants looking for a module */
for (i=0 ; i<HAL_VARIANT_KEYS_COUNT+1 ; i++) {
    if (i < HAL_VARIANT_KEYS_COUNT) {
        if (property_get(variant_keys[i], prop, NULL) == 0) {
            continue;
        }
        snprintf(path, sizeof(path), "%s/%s.%s.so",
                 HAL_LIBRARY_PATH2, name, prop);
        if (access(path, R_OK) == 0) break;

        snprintf(path, sizeof(path), "%s/%s.%s.so",
                 HAL_LIBRARY_PATH1, name, prop);
        if (access(path, R_OK) == 0) break;
    } else {
        snprintf(path, sizeof(path), "%s/%s.default.so",
                 HAL_LIBRARY_PATH1, name);
        if (access(path, R_OK) == 0) break;
    }
}
static const char *variant_keys[] = {
    "ro.hardware",  /* This goes first so that it can pick up a different
                       file on the emulator. */
    "ro.product.board",
    "ro.board.platform",
    "ro.arch"
};

static const int HAL_VARIANT_KEYS_COUNT =
    (sizeof(variant_keys)/sizeof(variant_keys[0]));

/path/to/aosp/out/target/product/panda/system/build.prop
这里面有platform或者arch相关的数据

/*
 * load the symbols resolving undefined symbols before
 * dlopen returns. Since RTLD_GLOBAL is not or'd in with
 * RTLD_NOW the external symbols will not be global
 */
handle = dlopen(path, RTLD_NOW);

/* Get the address of the struct hal_module_info. */
const char *sym = HAL_MODULE_INFO_SYM_AS_STR;
hmi = (struct hw_module_t *)dlsym(handle, sym);
if (hmi == NULL) {
    ALOGE("load: couldn't find symbol %s", sym);
    status = -EINVAL;
    goto done;
}

*pHmi = hmi; // 动态加载Vendor HAL之后要返回这个hw_module_t结构体供Android Service/HAL层使用

这个load出来的HAL_MODULE_INFO_SYM_AS_STR是什么?

先看它是什么,定义位于hardware.h当中

/**
 * Name of the hal_module_info
 */
#define HAL_MODULE_INFO_SYM         HMI

/**
 * Name of the hal_module_info as a string
 */
#define HAL_MODULE_INFO_SYM_AS_STR  "HMI"

理论上来说dlsym就是会去load一个名字为”HMI”的变量/函数,也就是说在Vendor HAL当中必然有名字为”HMI”的这样一个东西。
看看hw_module_t的定义。

/**
 * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
 * and the fields of this data structure must begin with hw_module_t
 * followed by module specific information.
 */
typedef struct hw_module_t {
    /** tag must be initialized to HARDWARE_MODULE_TAG */
    uint32_t tag;

    /**
     * The API version of the implemented module. The module owner is
     * responsible for updating the version when a module interface has
     * changed.
     *
     * The derived modules such as gralloc and audio own and manage this field.
     * The module user must interpret the version field to decide whether or
     * not to inter-operate with the supplied module implementation.
     * For example, SurfaceFlinger is responsible for making sure that
     * it knows how to manage different versions of the gralloc-module API,
     * and AudioFlinger must know how to do the same for audio-module API.
     *
     * The module API version should include a major and a minor component.
     * For example, version 1.0 could be represented as 0x0100. This format
     * implies that versions 0x0100-0x01ff are all API-compatible.
     *
     * In the future, libhardware will expose a hw_get_module_version()
     * (or equivalent) function that will take minimum/maximum supported
     * versions as arguments and would be able to reject modules with
     * versions outside of the supplied range.
     */
    uint16_t module_api_version;
#define version_major module_api_version
    /**
     * version_major/version_minor defines are supplied here for temporary
     * source code compatibility. They will be removed in the next version.
     * ALL clients must convert to the new version format.
     */

    /**
     * The API version of the HAL module interface. This is meant to
     * version the hw_module_t, hw_module_methods_t, and hw_device_t
     * structures and definitions.
     *
     * The HAL interface owns this field. Module users/implementations
     * must NOT rely on this value for version information.
     *
     * Presently, 0 is the only valid value.
     */
    uint16_t hal_api_version;
#define version_minor hal_api_version

    /** Identifier of module */
    const char *id;

    /** Name of this module */
    const char *name;

    /** Author/owner/implementor of the module */
    const char *author;

    /** Modules methods */
    struct hw_module_methods_t* methods;

    /** module's dso */
    void* dso;

    /** padding to 128 bytes, reserved for future use */
    uint32_t reserved[32-7];

} hw_module_t;

定义位于hardware.h当中

对于TI的hardware来讲,load的就是camera.omap4.so(也就是前面所说的camera.$platform$.so)的名字为HMI的结构体。
这个需要仔细看下才能发现HAL_MODULE_INFO_SYM实际就是HMI,hardware.h当中定义的那个macro。
HMI位于
/path/to/aosp/hardware/ti/omap4xxx/camera/CameraHal_Module.cpp
当中

camera_module_t HAL_MODULE_INFO_SYM = {
    common: {
         tag: HARDWARE_MODULE_TAG,
         version_major: 1,
         version_minor: 0,
         id: CAMERA_HARDWARE_MODULE_ID,
         name: "TI OMAP CameraHal Module",
         author: "TI",
         methods: &camera_module_methods,
         dso: NULL, /* remove compilation warnings */
         reserved: {0}, /* remove compilation warnings */
    },
    get_number_of_cameras: camera_get_number_of_cameras,
    get_camera_info: camera_get_camera_info,
};

通过’arm-eabi-objdump’ -t camera.omap4.so | grep ‘HMI’
也可以验证这一点

而再仔细看看这个common实际上就是hw_module_t这个结构体,这也就印证了前面hw_module_t定义的地方所说的

/**
* Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
* and the fields of this data structure must begin with hw_module_t
* followed by module specific information.
*/

这里需要注意的是hardware.h当中定义的都是hw_module_t,但是Vendor HAL实现的都是自己的xxx_module_t,比如方法定义的类型和实际类型不一样,这个能工作有什么样的奥秘?
其实就是指针的第一个地址,Vendor HAL当中定义的结构必须要求第一个域是hw_module_t,这样地址就可以对应起来。
总之Android使用的这种技法需要理解,要不然都无法知道他们之间是怎么串起来的,指针强大,但小心使用,否则也可能伤身!
camera_device_t和hw_device_t也是一样的用法!

#define CAMERA_HARDWARE_MODULE_ID "camera"

定义位于camera_common.h当中

其他一些使用到的宏之类的

#ifndef _LINUX_LIMITS_H
#define _LINUX_LIMITS_H
#define NR_OPEN 1024
#define NGROUPS_MAX 65536
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ARG_MAX 131072
#define CHILD_MAX 999
#define OPEN_MAX 256
#define LINK_MAX 127
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define MAX_CANON 255
#define MAX_INPUT 255
#define NAME_MAX 255
#define PATH_MAX 4096
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define PIPE_BUF 4096
#define XATTR_NAME_MAX 255
#define XATTR_SIZE_MAX 65536
#define XATTR_LIST_MAX 65536
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define RTSIG_MAX 32
#endif

对于Camera HAL来说,你需要找到

typedef struct camera_device {
    /**
     * camera_device.common.version must be in the range
     * HARDWARE_DEVICE_API_VERSION(0,0)-(1,FF). CAMERA_DEVICE_API_VERSION_1_0 is
     * recommended.
     */
    hw_device_t common;
    camera_device_ops_t *ops;
    void *priv;
} camera_device_t;

位于camera.h当中

typedef struct camera_module {
    hw_module_t common;
    int (*get_number_of_cameras)(void);
    int (*get_camera_info)(int camera_id, struct camera_info *info);
} camera_module_t;

位于camera_common.h当中

扫平这些障碍之后,后面就会顺利多了。
比如takePicture这个动作,在CameraHal_Module当中会被于CameraHal关联起来,上层调用就会进入到CameraHal当中对应的方法

status_t CameraHal::takePicture( )
{
    status_t ret = NO_ERROR;
    CameraFrame frame;
    CameraAdapter::BuffersDescriptor desc;
    int burst;
    const char *valstr = NULL;
    unsigned int bufferCount = 1;

    Mutex::Autolock lock(mLock);

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

    gettimeofday(&mStartCapture, NULL);

#endif

    LOG_FUNCTION_NAME;

    if(!previewEnabled() && !mDisplayPaused)
        {
        LOG_FUNCTION_NAME_EXIT;
        CAMHAL_LOGEA("Preview not started...");
        return NO_INIT;
        }

    // return error if we are already capturing
    if ( (mCameraAdapter->getState() == CameraAdapter::CAPTURE_STATE &&
          mCameraAdapter->getNextState() != CameraAdapter::PREVIEW_STATE) ||
         (mCameraAdapter->getState() == CameraAdapter::VIDEO_CAPTURE_STATE &&
          mCameraAdapter->getNextState() != CameraAdapter::VIDEO_STATE) ) {
        CAMHAL_LOGEA("Already capturing an image...");
        return NO_INIT;
    }

    // we only support video snapshot if we are in video mode (recording hint is set)
    valstr = mParameters.get(TICameraParameters::KEY_CAP_MODE);
    if ( (mCameraAdapter->getState() == CameraAdapter::VIDEO_STATE) &&
         (valstr && strcmp(valstr, TICameraParameters::VIDEO_MODE)) ) {
        CAMHAL_LOGEA("Trying to capture while recording without recording hint set...");
        return INVALID_OPERATION;
    }

    if ( !mBracketingRunning )
        {

         if ( NO_ERROR == ret )
            {
            burst = mParameters.getInt(TICameraParameters::KEY_BURST);
            }

         //Allocate all buffers only in burst capture case
         if ( burst > 1 )
             {
             bufferCount = CameraHal::NO_BUFFERS_IMAGE_CAPTURE;
             if ( NULL != mAppCallbackNotifier.get() )
                 {
                 mAppCallbackNotifier->setBurst(true);
                 }
             }
         else
             {
             if ( NULL != mAppCallbackNotifier.get() )
                 {
                 mAppCallbackNotifier->setBurst(false);
                 }
             }

        // 这就是为什么我们正常拍照,preview就会stop住,拍完一张之后需要去startPreview
        // pause preview during normal image capture
        // do not pause preview if recording (video state)
        if (NO_ERROR == ret &&
                NULL != mDisplayAdapter.get() &&
                burst < 1) {
            if (mCameraAdapter->getState() != CameraAdapter::VIDEO_STATE) {
                mDisplayPaused = true;
                mPreviewEnabled = false;
                ret = mDisplayAdapter->pauseDisplay(mDisplayPaused);
                // since preview is paused we should stop sending preview frames too
                if(mMsgEnabled & CAMERA_MSG_PREVIEW_FRAME) {
                    mAppCallbackNotifier->disableMsgType (CAMERA_MSG_PREVIEW_FRAME);
                }
            }

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS
            mDisplayAdapter->setSnapshotTimeRef(&mStartCapture);
#endif
        }

        // if we taking video snapshot...
        if ((NO_ERROR == ret) && (mCameraAdapter->getState() == CameraAdapter::VIDEO_STATE)) {
            // enable post view frames if not already enabled so we can internally
            // save snapshot frames for generating thumbnail
            if((mMsgEnabled & CAMERA_MSG_POSTVIEW_FRAME) == 0) {
                mAppCallbackNotifier->enableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
            }
        }

        if ( (NO_ERROR == ret) && (NULL != mCameraAdapter) )
            {
            if ( NO_ERROR == ret )
                ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_QUERY_BUFFER_SIZE_IMAGE_CAPTURE,
                                                  ( int ) &frame,
                                                  bufferCount);

            if ( NO_ERROR != ret )
                {
                CAMHAL_LOGEB("CAMERA_QUERY_BUFFER_SIZE_IMAGE_CAPTURE returned error 0x%x", ret);
                }
            }

        if ( NO_ERROR == ret )
            {
            mParameters.getPictureSize(( int * ) &frame.mWidth,
                                       ( int * ) &frame.mHeight);

            // 分配buffer
            ret = allocImageBufs(frame.mWidth,
                                 frame.mHeight,
                                 frame.mLength,
                                 mParameters.getPictureFormat(),
                                 bufferCount);
            if ( NO_ERROR != ret )
                {
                CAMHAL_LOGEB("allocImageBufs returned error 0x%x", ret);
                }
            }

        if (  (NO_ERROR == ret) && ( NULL != mCameraAdapter ) )
            {
            desc.mBuffers = mImageBufs;
            desc.mOffsets = mImageOffsets;
            desc.mFd = mImageFd;
            desc.mLength = mImageLength;
            desc.mCount = ( size_t ) bufferCount;
            desc.mMaxQueueable = ( size_t ) bufferCount;

            ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_USE_BUFFERS_IMAGE_CAPTURE,
                                              ( int ) &desc);
            }
        }

    if ( ( NO_ERROR == ret ) && ( NULL != mCameraAdapter ) )
        {

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

         //pass capture timestamp along with the camera adapter command
        ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_START_IMAGE_CAPTURE,  (int) &mStartCapture);

#else

        ret = mCameraAdapter->sendCommand(CameraAdapter::CAMERA_START_IMAGE_CAPTURE);

#endif

        }

    return ret;
}

经过BaseCameraAdapter(这个当中的takePicture实现只是Stub)会到
status_t OMXCameraAdapter::takePicture(),然后会将该动作加入到Command队列当中去执行,最终实现在OMXCapture::startImageCapture()当中。
这里就会透过OMX_FillThisBuffer压入buffer到capture port,当capture完成之后会收到FillBufferDone这个callback,进入回调函数。

OMX_ERRORTYPE OMXCameraAdapter::OMXCameraGetHandle(OMX_HANDLETYPE *handle, OMX_PTR pAppData )
{
    OMX_ERRORTYPE eError = OMX_ErrorUndefined;

    for ( int i = 0; i < 5; ++i ) {
        if ( i > 0 ) {
            // sleep for 100 ms before next attempt
            usleep(100000);
        }

        // setup key parameters to send to Ducati during init
        OMX_CALLBACKTYPE oCallbacks;

        // initialize the callback handles // 注册callback
        oCallbacks.EventHandler    = android::OMXCameraAdapterEventHandler;
        oCallbacks.EmptyBufferDone = android::OMXCameraAdapterEmptyBufferDone;
        oCallbacks.FillBufferDone  = android::OMXCameraAdapterFillBufferDone;

        // get handle
        eError = OMX_GetHandle(handle, (OMX_STRING)"OMX.TI.DUCATI1.VIDEO.CAMERA", pAppData, &oCallbacks);
        // 注意这个library,实际就是/path/to/aosp/hardware/ti/omap4xxx/domx/omx_proxy_component/omx_camera/src/omx_proxy_camera.c
        // 最终通过pRPCCtx->fd_omx = open("/dev/rpmsg-omx1", O_RDWR);跟底层交互
        if ( eError == OMX_ErrorNone ) {
            return OMX_ErrorNone;
        }

        CAMHAL_LOGEB("OMX_GetHandle() failed, error: 0x%x", eError);
    }

    *handle = 0;
    return eError;
}

有了回调函数就会一直往上层传,最终就用户就得到了拍摄的照片数据。
CameraAdapter_Factory(size_t sensor_index)在OMXCameraAdapter当中。

另外TI Omap还有实现一套通过V4L2交互的方式,只是默认没有启用。
它是经过V4LCameraAdapter::UseBuffersPreview
最终会走向V4L2中去

而CameraAdapter_Factory()在V4LCameraAdapter当中。

Stagefright和codec的交互

Stagefright和codec的交互是通过OpenMAX IL来完成的。

OpenMAX IL的规范头文件位于
/path/to/aosp/frameworks/native/include/media/openmax

概述下OpenMAX IL,它包括客户端(Client),组件(Component),端口(Port),隧道化(Tunneled)。
Client是调用Component的对象,Port和Tunneled是Component之间通信的方式,Component之间还可以存在私有的通信方式。
组件(Component)的功能和其定义的端口类型密切相关,通常情况下:只有一个输出端口的,为Source组件;只有一个输入端口的,为Sink组件;有多个输入端口,一个输出端口的为Mux组件;有一个输入端口,多个输出端口的为DeMux组件;输入输出端口各一个组件的为中间处理环节,这是最常见的组件。

在Android 2.2以及之后,Stagefright是作为OpenMAX IL的上层存在的,它跟OpenMAX IL对接,然后OpenMAX IL又封装了具体的codec。

之前我有在http://guoh.org/lifelog/2013/06/android-mediarecorder-architecture/分析过StagefrightRecorder,当时跟codec的集成我没有仔细分析。

这篇文章会详细分析下这个逻辑过程。

首先我们会碰到几个关键的类,第一个就是OMX,并且它的构造方法当中会去创建一个OMXMaster,继续追踪下,发现OMXMaster正如其名字所示,是一个管理者角色。
OMXMaster作为管理者,每添加一个plugin进来,都会记录在

KeyedVector<String8, OMXPluginBase *> mPluginByComponentName; // 重复名字的codec不会再让其加进来,也就是它会去确定该plugin支持多少codec,然后记录下来
List<OMXPluginBase *> mPlugins; // 主要负责释放内存(不与业务相关,只管有添加进来的plugin)

OMXMaster到底会加多少个OMXPlugin进来?
以及到底会有多少个OMXMaster?
有多少个OMX就有多少个OMXMaster,那有多少个OMX呢?
只有一个,参见

sp<IOMX> MediaPlayerService::getOMX() {
    Mutex::Autolock autoLock(mLock);

    if (mOMX.get() == NULL) {
        mOMX = new OMX;
    }

    return mOMX;
}

那会有多少个OMXPlugin呢?。

一个SoftOMXPlugin,当中可以加载很多codec(s)进来
/path/to/aosp/frameworks/av/media/libstagefright/codecs/
有多少soft codec(s)都在上面这个目录

这里需要说明的是真实的codec的类结构,以FLAC为例子(箭头的方向为类继承的方向)
SoftFlacEncoder -> SimpleSoftOMXComponent -> SoftOMXComponent,SoftOMXComponent把OpenMAX IL定义的组件封装成一个C++类,里面有绑定组件操作的方法,但是它实都是以stub方式实现的,也就是说具体的实现在其子类。
SimpleSoftOMXComponent提供一个用于消息传递的ALooper和AHandlerReflector,实现了比如sendCommand,emptyThisBuffer和fillThisBuffer这三个异步方法(为什么只有这三个是异步,因为OpenMAX IL的规范要求这三个是non-blocking的,在我们这里实现的也就是通过looper/handler发送处理消息),以及useBuffer,allocateBuffer和freeBuffer等等一些其他方法。
这样OMXCodec当中针对OpenMAX IL组件的一个操作就会通过IOMX,进而到OMXNodeInstance,最终进入我们SoftOMXComponent里面,刚刚我们有讲过方法具体实现是在其子类,这样只要找到对应的地方就可以了。

下面的这一段实例代码很清晰,以sendCommand为例子

status_t OMXCodec::init() {
    // mLock is held.
    ...
    err = mOMX->sendCommand(mNode, OMX_CommandStateSet, OMX_StateIdle);
    ...
}
status_t OMX::sendCommand(
        node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
    return findInstance(node)->sendCommand(cmd, param);
}
status_t OMXNodeInstance::sendCommand(
        OMX_COMMANDTYPE cmd, OMX_S32 param) {
    Mutex::Autolock autoLock(mLock);

    OMX_ERRORTYPE err = OMX_SendCommand(mHandle, cmd, param, NULL); // 注意这个宏的定义,实际就是去调用组件的SendCommand方法
    return StatusFromOMXError(err);
}
/** Send a command to the component.  This call is a non-blocking call.
    The component should check the parameters and then queue the command
    to the component thread to be executed.  The component thread shall 
    send the EventHandler() callback at the conclusion of the command. 
    This macro will go directly from the application to the component (via
    a core macro).  The component will return from this call within 5 msec.
    
    When the command is "OMX_CommandStateSet" the component will queue a
    state transition to the new state idenfied in nParam.
    
    When the command is "OMX_CommandFlush", to flush a port's buffer queues,
    the command will force the component to return all buffers NOT CURRENTLY 
    BEING PROCESSED to the application, in the order in which the buffers 
    were received.
    
    When the command is "OMX_CommandPortDisable" or 
    "OMX_CommandPortEnable", the component's port (given by the value of
    nParam) will be stopped or restarted. 
    
    When the command "OMX_CommandMarkBuffer" is used to mark a buffer, the
    pCmdData will point to a OMX_MARKTYPE structure containing the component
    handle of the component to examine the buffer chain for the mark.  nParam1
    contains the index of the port on which the buffer mark is applied.

    Specification text for more details. 
    
    @param [in] hComponent
        handle of component to execute the command
    @param [in] Cmd
        Command for the component to execute
    @param [in] nParam
        Parameter for the command to be executed.  When Cmd has the value 
        OMX_CommandStateSet, value is a member of OMX_STATETYPE.  When Cmd has 
        the value OMX_CommandFlush, value of nParam indicates which port(s) 
        to flush. -1 is used to flush all ports a single port index will 
        only flush that port.  When Cmd has the value "OMX_CommandPortDisable"
        or "OMX_CommandPortEnable", the component's port is given by 
        the value of nParam.  When Cmd has the value "OMX_CommandMarkBuffer"
        the components pot is given by the value of nParam.
    @param [in] pCmdData
        Parameter pointing to the OMX_MARKTYPE structure when Cmd has the value
        "OMX_CommandMarkBuffer".     
    @return OMX_ERRORTYPE
        If the command successfully executes, the return code will be
        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
    @ingroup comp
 */
#define OMX_SendCommand(                                    \
         hComponent,                                        \
         Cmd,                                               \
         nParam,                                            \
         pCmdData)                                          \
     ((OMX_COMPONENTTYPE*)hComponent)->SendCommand(         \
         hComponent,                                        \
         Cmd,                                               \
         nParam,                                            \
         pCmdData)                          /* Macro End */
OMX_ERRORTYPE SimpleSoftOMXComponent::sendCommand(
        OMX_COMMANDTYPE cmd, OMX_U32 param, OMX_PTR data) {
    CHECK(data == NULL);

    sp<AMessage> msg = new AMessage(kWhatSendCommand, mHandler->id());
    msg->setInt32("cmd", cmd);
    msg->setInt32("param", param);
    msg->post();

    return OMX_ErrorNone;
}

当然这是异步的话,就还会有需要异步执行的处理函数回来,比如在onMessageReceived方法当中就有类型为kWhatSendCommand,kWhatEmptyThisBuffer和kWhatFillThisBuffer的消息,分别会去调用自己的处理方法。
其他同步的方法调用过程类似,这里我们就不具体再说,对照代码看看应该就能理解!

还有一点需要提一下,就是具体codec被创建的方式和时间,soft codec是在SoftOMXPlugin::makeComponentInstance当中通过调用该codec的createSoftOMXComponent方法来创建出来的,换句话说我们的codec实现必须要包含这样名字的一个方法,对于FLAC就是

android::SoftOMXComponent *createSoftOMXComponent(
        const char *name, const OMX_CALLBACKTYPE *callbacks,
        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
    return new android::SoftFlacEncoder(name, callbacks, appData, component);
}

会有多少个Vendor plugin(即libstagefrighthw)?
答案也是一个,但是它同样也包含有很多个codec(s)进来,以TI的HW为例子:
TIOMXPlugin(即Vendor plugin)在/path/to/aosp/hardware/ti/omap4xxx/libstagefrighthw/这里
但是具体的实现在/path/to/aosp/hardware/ti/omap4xxx/domx/omx_core/(即libOMX_Core.so)当中
假设要实例化一个codec,就需要经过如下步骤:
TIOMXPlugin.cpp(makeComponentInstance) -> OMX_Core_Wrapper.c(TIOMX_GetHandle) -> OMX_Core.c(OMX_GetHandle) -> 动态去load指定的codec的so档案,然后调用它的初始化方法(OMX_ComponentInit),至于这个方法名,应该是遵循规范得来的(规范上有写这么句话,有这么一个error msg code)。

/** The component specified did not have a "OMX_ComponentInit" or
  "OMX_ComponentDeInit entry point */
OMX_ErrorInvalidComponent = (OMX_S32) 0x80001004,

其支持的codec(s)在/path/to/aosp/hardware/ti/omap4xxx/domx/omx_proxy_component/
当然codec最终的实现可能还是在其他地方!

以下这些方法都是OpenMAX IL所定义的于OMX Core有关的标准方法,详见OMX_Core.h,TI在OMX_Core.c当中有去实现这些方法,这些可以说是管理OpenMAX IL这一层所封装的Component的接口,当然包括自身的生命周期的管理,以及Component之间的沟通。

OMX_API OMX_ERRORTYPE OMX_Init
OMX_API OMX_ERRORTYPE OMX_Deinit
OMX_API OMX_ERRORTYPE OMX_ComponentNameEnum
OMX_API OMX_ERRORTYPE OMX_GetHandle
OMX_API OMX_ERRORTYPE OMX_FreeHandle
OMX_API OMX_ERRORTYPE OMX_SetupTunnel
OMX_API OMX_ERRORTYPE OMX_GetContentPipe
OMX_API OMX_ERRORTYPE OMX_GetComponentsOfRole
OMX_API OMX_ERRORTYPE OMX_GetRolesOfComponent

也就是说OMX是操作所有codec的窗口,它透过OMXMaster来管理Soft/Vendor plugin,每个plugin当中有属于自己的codec(s)。
比如典型的,OMX有allocateNode这个方法,这是增加codec时需要的,当然它实质上是去调用OMXMaster -> OMXPlugin -> OMX_Core -> 特定的codec的实例化方法。

status_t OMX::allocateNode(
        const char *name, const sp<IOMXObserver> &observer, node_id *node) {
    Mutex::Autolock autoLock(mLock);

    *node = 0;

    OMXNodeInstance *instance = new OMXNodeInstance(this, observer);

    OMX_COMPONENTTYPE *handle; // 声明一个OMX_COMPONENTTYPE指针

    // 接着makeComponentInstance创建出来,双指针OUT传值
    // 最终会发现在codec当中会去调用这样一个方法(对于TI的HW,这个方法位于omx_proxy_common.c当中)
    // /*Calling Proxy Common Init() */
	// eError = OMX_ProxyCommonInit(hComponent);
	// 它会去初始化OMX_COMPONENTTYPE的各个属性,方法,例如OMX_COMPONENTTYPE::SetCallbacks等等

    OMX_ERRORTYPE err = mMaster->makeComponentInstance(
            name, &OMXNodeInstance::kCallbacks,
            instance, &handle);

    if (err != OMX_ErrorNone) {
        ALOGV("FAILED to allocate omx component '%s'", name);

        instance->onGetHandleFailed();

        return UNKNOWN_ERROR;
    }

    *node = makeNodeID(instance);
    mDispatchers.add(*node, new CallbackDispatcher(instance));

    instance->setHandle(*node, handle);

    mLiveNodes.add(observer->asBinder(), instance);
    observer->asBinder()->linkToDeath(this);

    return OK;
}

有两点值得注意的是
a)

status_t OMXClient::connect() {
    sp<IServiceManager> sm = defaultServiceManager();
    sp<IBinder> binder = sm->getService(String16("media.player"));
    sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);

    // defaultServiceManager(位于/path/to/aosp/frameworks/native/include/binder/IServiceManager.h)
    // 这样调用就跟本地的一个service指针来调用getOMX是一样的,因为本来StatefrightRecorder是在MediaPlayerService当中new出来的
    // 这个我们在之前的研究讨论中做过实验,详细参见http://guoh.org/lifelog/2013/06/sample-of-customized-service-on-android-platform/怎样为Android添加一个系统级服务

    CHECK(service.get() != NULL);

    mOMX = service->getOMX(); // BnOMX
    CHECK(mOMX.get() != NULL);

    // 什么情况会有不是在本地,貌似OMX是在MediaPlayerService当中new出来的,
    // OMXClient是在StagefrightRecorder当中new出来的,而StagefrightRecorder又是在MediaPlayerService当中new出来
    // 所以什么情况下会走到如下的这个using client-side OMX mux当中去(MuxOMX位于OMXClient.cpp当中)?
    if (!mOMX->livesLocally(NULL /* node */, getpid())) {
        ALOGI("Using client-side OMX mux.");
        mOMX = new MuxOMX(mOMX); // 继承自IOMX
    }

    return OK;
}

容易看出OMXClient是获取OMX的窗口

b)

sp<MediaSource> encoder = OMXCodec::Create(
        client.interface(), enc_meta,
        true /* createEncoder */, cameraSource,
        NULL, encoder_flags);

那OMXCodec/
/path/to/aosp/frameworks/av/include/media/stagefright
/path/to/aosp/frameworks/av/media/libstagefright
OMXNodeInstance
/path/to/aosp/frameworks/av/media/libstagefright/include
/path/to/aosp/frameworks/av/media/libstagefright/omx
是做什么的?
OMXCodec,OMXCodecObserver和OMXNodeInstance是一一对应的,
简单的可以理解它们3个构成了OpenMAX IL的一个Component,每一个node就是一个codec在OMX服务端的标识。
当然还有CallbackDispatcher,用于处理codec过来的消息,通过它的post/loop/dispatch来发起接收,最终透过IOMX::onMessage -> OMXNodeInstance::onMessage -> OMXCodecObserver::onMessage -> OMXCodec::on_message一路往上,当然消息的来源是因为我们有向codec注册OMXNodeInstance::kCallbacks,请看

status_t OMX::allocateNode(
        const char *name, const sp<IOMXObserver> &observer, node_id *node) {
    ...
    OMX_ERRORTYPE err = mMaster->makeComponentInstance(
            name, &OMXNodeInstance::kCallbacks,
            instance, &handle);
    ...
    return OK;
}

kCallbacks包含3种事件

OMX_CALLBACKTYPE OMXNodeInstance::kCallbacks = {
    &OnEvent, &OnEmptyBufferDone, &OnFillBufferDone
};

它们分别都会调用到自己owner的OnEvent/OnEmptyBufferDone/OnFillBufferDone

// static
OMX_ERRORTYPE OMXNodeInstance::OnEvent(
        OMX_IN OMX_HANDLETYPE hComponent,
        OMX_IN OMX_PTR pAppData,
        OMX_IN OMX_EVENTTYPE eEvent,
        OMX_IN OMX_U32 nData1,
        OMX_IN OMX_U32 nData2,
        OMX_IN OMX_PTR pEventData) {
    OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
    if (instance->mDying) {
        return OMX_ErrorNone;
    }
    return instance->owner()->OnEvent(
            instance->nodeID(), eEvent, nData1, nData2, pEventData);
}

而owner相应的又会调用自己dispatcher的post方法,如下:

OMX_ERRORTYPE OMX::OnEvent(
        node_id node,
        OMX_IN OMX_EVENTTYPE eEvent,
        OMX_IN OMX_U32 nData1,
        OMX_IN OMX_U32 nData2,
        OMX_IN OMX_PTR pEventData) {
    ALOGV("OnEvent(%d, %ld, %ld)", eEvent, nData1, nData2);

    omx_message msg;
    msg.type = omx_message::EVENT;
    msg.node = node;
    msg.u.event_data.event = eEvent;
    msg.u.event_data.data1 = nData1;
    msg.u.event_data.data2 = nData2;

    findDispatcher(node)->post(msg);

    return OMX_ErrorNone;
}

这样所有的事件都串起来了,消息有来源,有最终的去处!

结合这些信息所以我们就可以认为在这里是创建出了一个Component出来。

另外值得提一下的就是我们前面已经讲过Component之间也是可以有信息交互的,比如我们在录影的时候,CameraSource就作为数据的来源被OMXCodec持有,那么从OMXCodec读取的数据实际都是来自于CameraSource,然后就被MediaWriter,例如MPEG4Writer写到文件当中。

OMX_GetComponentVersion
OMX_SendCommand
OMX_GetParameter
OMX_SetParameter
OMX_GetConfig
OMX_SetConfig
OMX_GetExtensionIndex
OMX_GetState
OMX_UseBuffer
OMX_AllocateBuffer
OMX_FreeBuffer
OMX_EmptyThisBuffer
OMX_FillThisBuffer
OMX_UseEGLImage

以上这些macro位于OMX_Core.h当中,这是OpenMax所定义的
针对每一个Component封装,他们负责与具体的codec进行沟通,对于TI的HW,这些方法都实现于omx_proxy_common.c当中

其他内容:
1、HardwareAPI.h
extern android::OMXPluginBase *createOMXPlugin();
定义一个实例化plugin的入口方法,在OMXMaster当中回去动态调用这个方法

2、对于Qualcomm的HW来说,它的libstagefrighthw实现位于如下位置:
libomxcore.so /path/to/aosp/hardware/qcom/media/mm-core/omxcore

3、还有比较关键的就是OpenMAX IL之间本身的原理,还有相互沟通是怎么样的,这个比较重要。
都知道OpenMAX IL最大的作用就是一个适配层作用,它封装了底层Vendor容易变化的部分(不同codec/不同Vendor,或者其他形式的变化),对上层提供一个统一的接口(比如在这里就是Stagefright)。

一个Component的状态分为这么多种,这些定义在OMXCodec当中

enum State {
    DEAD,
    LOADED,
    LOADED_TO_IDLE,
    IDLE_TO_EXECUTING,
    EXECUTING,
    EXECUTING_TO_IDLE,
    IDLE_TO_LOADED,
    RECONFIGURING,
    ERROR
};

Component之间的交互,比如Tunneled communication,是通过Component的ComponentTunnelRequest方法来完成的(TI的HW实现在omx_proxy_common.c当中),但是从代码上看起来在Qualcomm或者TI的Camera这块都没有用到Tunneled communication,Qualcomm根本就没有去实现ComponentTunnelRequest方法。

Buffer的传输过程,这块初看很复杂,其实流程还是比较清楚的。在Camera录影这个场景中,MediaWriter会不断的请求MediaSource的read方法,从中间读取数据,但是在MediaSource内部会对其进行控制,用的就是signal(Condition mFrameAvailableCondition;)。OMXCodec的drainInputBuffer/fillOutputBuffer这两个方法最重要,排空input缓冲(source过来的数据)和填充output缓冲(准备给writer的数据)。drainInputBuffer把数据发到实际的codec,codec收完之后发送EMPTY_BUFFER_DONE消息过来,OMXCodec收到该消息继续发送数据到codec。另外同时OMXCodec还会执行fillOutputBuffer,即喂给codec一个空的buffer,codec编码完成之后会发送FILL_BUFFER_DONE过来,这个消息带有编码好的数据,即被填满的buffer,这样MediaWriter就收到编码完成的数据,然后保存就可以了。细节性的东西就需要自己追踪代码可能更好理解,聪明的你可能就想到了那这里是不是存在两个Buffer,是的,你是对的,请看List mFilledBuffers;

这里再罗嗦的讲解一遍实际过程(MPEG4录影为例子):
MPEG4Writer:
开始read之后,会调用drainInputBuffers(),它会从kPortIndexInput取出一组buffer出来,先把数据从CameraSource读取到这个buffer当中,然后调用IOMX的emptyBuffer把buffer交给实际的encoder,然后while loop等待encoder把output buffer(从kPortIndexOutput取出)填满(encoder的FILL_BUFFER_DONE回来会将通过msg带回来的数据放到output buffer,还会在mFilledBuffers当中记录下该buffer的index,并通过mBufferFilled.signal()广播出来),也就是说OMXCodec的read方法是同步的,必须要等到从CameraSource读取数据,然后encoder编码完毕返回数据才算read结束,然后会MPEG4Writer会将该buffer拷贝一份出来,并且释放掉原来的buffer(这个buffer是大家共同可以访问的,所以MPEG4Writer不想或者不应该长期持有它,写文件所消耗的时间并不是一定的,所以这里应该算是以空间换时间的一种做法)。

buffer的分配:
input buffer的MediaBuffer是在CameraSource当中分配,OMXCodec当中release(EMPTY_BUFFER_DONE回来如果buffer没有被release掉就执行release过程)
output buffer的MediaBuffer分多种情况,有可能是在allocateBuffer的时候分配的,有可能是FILL_BUFFER_DONE回来的时候分配,在MediaWriter读取到处理好之后的数据自己拷贝该数据一份之后release。

End Of Stream:
一般情况decoder或encoder告诉我们EOS,但是有些时候不告诉我们的时候OMXCodec也有做workaround,通过计算不在OMX Component的buffer的个数,如果所有buffer都不被OMX Component所持有,就认为已经到EOS了。

Bitmap备忘

这是一篇阅读Bitmap相关内容的笔记!
程序运行环境:

Linux KNIGHT 3.0.0-28-generic #45-Ubuntu SMP Wed Nov 14 21:57:26 UTC 2012 x86_64 x86_64 x86_64 GNU/Linux

BMP图像文件被分成4个部分:位图文件头(Bitmap File Header)、位图信息头(Bitmap Info Header)、颜色表(Color Map)和位图数据(即图像数据,Data Bits或Data Body)。

这本书(http://vipbase.net/ipbook/chap01.htm)第一章节有讲这些。

biBitCount:每个像素所占的位数(bit),其值必须为1(黑白图像)、4(16色图)、8(256色)、24(真彩色图),32(带透明度alpha通道,位于前8位)。

以下这几篇博客有讲些基本的东西。
http://www.cnblogs.com/shengansong/archive/2011/09/23/2186409.html
http://blog.csdn.net/yutianzuijin/article/details/8243343
http://blog.csdn.net/scut1135/article/details/5573395

我有参考以上资料修改出一个可以在x86_64 GNU/Linux编译并运行的程序,注释里面有写些需要注意的地方。

some pitfalls of __attribute__((packed)), plz refer
http://stackoverflow.com/questions/8568432/is-gccs-attribute-packed-pragma-pack-unsafe
http://stackoverflow.com/questions/11770451/what-is-the-meaning-of-attribute-packed-aligned4
http://stackoverflow.com/questions/11667181/why-does-padding-have-to-be-a-power-of-two
http://stackoverflow.com/questions/119123/why-isnt-sizeof-for-a-struct-equal-to-the-sum-of-sizeof-of-each-member
http://en.wikipedia.org/wiki/Data_structure_alignment

Linux下BMP转化为JPEG程序源代码
http://www.linuxidc.com/Linux/2011-03/33193.htm

如果你自己要开发JPEG相关的话,确保这些包是有安装的。

sudo apt-get install libjpeg62
sudo apt-get install libjpeg62-dev

Android MediaRecorder系统结构

前面有分析过Camera的实现,现在来看看MediaRecorder的实现,这里我不会太去关注它的分层结构,我更关注它的逻辑!

APP层 /path/to/aosp/frameworks/base/media/java/android/media/MediaRecorder.java
JNI层 /path/to/aosp/frameworks/base/media/jni/android_media_MediaRecorder.cpp
调用NATIVE层的MediaRecorder(这里是BnMediaRecorderClient)
header /path/to/aosp/frameworks/av/include/media/mediarecorder.h
implementation /path/to/aosp/frameworks/av/media/libmedia/mediarecorder.cpp

MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL)
{
    ALOGV("constructor");

    const sp<IMediaPlayerService>& service(getMediaPlayerService());
    if (service != NULL) {
        mMediaRecorder = service->createMediaRecorder(getpid());
    }
    if (mMediaRecorder != NULL) {
        mCurrentState = MEDIA_RECORDER_IDLE;
    }

    doCleanUp();
}

getMediaPlayerService()这个方法位于/path/to/aosp/frameworks/av/include/media/IMediaDeathNotifier.h

获取到MediaPlayerService(这个是BpMediaPlayerService)之后
调用IMediaPlayerService当中的

sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(pid_t pid)
{
    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid);
    wp<MediaRecorderClient> w = recorder;
    Mutex::Autolock lock(mLock);
    mMediaRecorderClients.add(w);
    ALOGV("Create new media recorder client from pid %d", pid);
    return recorder;
}

创建MediaRecorderClient(这里是BnMediaRecorder)

但是通过binder拿到的是BpMediaRecorder
因为有如下的interface_cast过程

virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid)
{
    Parcel data, reply;
    data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
    data.writeInt32(pid);
    remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
    return interface_cast<IMediaRecorder>(reply.readStrongBinder());
}

而MediaRecorderClient当中又会创建StagefrightRecorder(MediaRecorderBase),它位于
/path/to/aosp/frameworks/av/media/libmediaplayerservice/StagefrightRecorder.cpp

目前我们可以认为在APP/JNI/NATIVE这边是在一个进程当中,在MediaPlayerService当中的MediaRecorderClient/StagefrightRecorder是在另外一个进程当中,他们之间通过binder通信,而且Bp和Bn我们也都有拿到,后面我们将不再仔细区分Bp和Bn。

客户端这边
BnMediaRecorderClient
BpMediaRecorder
BpMediaPlayerService

服务端这边
BpMediaRecorderClient(如果需要通知客户端的话,它可以获得这个Bp)
BnMediaRecorder
BnMediaPlayerService

这有张图(点过去看原始大图)
Android MediaRecorder Diagram

我们以开始录影为例子,比如start()

在这里就兵分两路,一个CameraSource,一个MPEG4Writer(sp mWriter)
这两个class都位于/path/to/aosp/frameworks/av/media/libstagefright/当中

status_t StagefrightRecorder::startMPEG4Recording() {
    int32_t totalBitRate;
    status_t err = setupMPEG4Recording(
            mOutputFd, mVideoWidth, mVideoHeight,
            mVideoBitRate, &totalBitRate, &mWriter);
    if (err != OK) {
        return err;
    }

    int64_t startTimeUs = systemTime() / 1000;
    sp<MetaData> meta = new MetaData;
    setupMPEG4MetaData(startTimeUs, totalBitRate, &meta);

    err = mWriter->start(meta.get());
    if (err != OK) {
        return err;
    }

    return OK;
}
status_t StagefrightRecorder::setupMPEG4Recording(
        int outputFd,
        int32_t videoWidth, int32_t videoHeight,
        int32_t videoBitRate,
        int32_t *totalBitRate,
        sp<MediaWriter> *mediaWriter) {
    mediaWriter->clear();
    *totalBitRate = 0;
    status_t err = OK;
    sp<MediaWriter> writer = new MPEG4Writer(outputFd);

    if (mVideoSource < VIDEO_SOURCE_LIST_END) {

        sp<MediaSource> mediaSource;
        err = setupMediaSource(&mediaSource); // very important
        if (err != OK) {
            return err;
        }

        sp<MediaSource> encoder;
        err = setupVideoEncoder(mediaSource, videoBitRate, &encoder); // very important
        if (err != OK) {
            return err;
        }

        writer->addSource(encoder);
        *totalBitRate += videoBitRate;
    }

    // Audio source is added at the end if it exists.
    // This help make sure that the "recoding" sound is suppressed for
    // camcorder applications in the recorded files.
    if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
        err = setupAudioEncoder(writer); // very important
        if (err != OK) return err;
        *totalBitRate += mAudioBitRate;
    }

    ...

    writer->setListener(mListener);
    *mediaWriter = writer;
    return OK;
}
// Set up the appropriate MediaSource depending on the chosen option
status_t StagefrightRecorder::setupMediaSource(
                      sp<MediaSource> *mediaSource) {
    if (mVideoSource == VIDEO_SOURCE_DEFAULT
            || mVideoSource == VIDEO_SOURCE_CAMERA) {
        sp<CameraSource> cameraSource;
        status_t err = setupCameraSource(&cameraSource);
        if (err != OK) {
            return err;
        }
        *mediaSource = cameraSource;
    } else if (mVideoSource == VIDEO_SOURCE_GRALLOC_BUFFER) {
        // If using GRAlloc buffers, setup surfacemediasource.
        // Later a handle to that will be passed
        // to the client side when queried
        status_t err = setupSurfaceMediaSource();
        if (err != OK) {
            return err;
        }
        *mediaSource = mSurfaceMediaSource;
    } else {
        return INVALID_OPERATION;
    }
    return OK;
}
status_t StagefrightRecorder::setupCameraSource(
        sp<CameraSource> *cameraSource) {
    status_t err = OK;
    if ((err = checkVideoEncoderCapabilities()) != OK) {
        return err;
    }
    Size videoSize;
    videoSize.width = mVideoWidth;
    videoSize.height = mVideoHeight;
    if (mCaptureTimeLapse) {
        if (mTimeBetweenTimeLapseFrameCaptureUs < 0) {
            ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
                mTimeBetweenTimeLapseFrameCaptureUs);
            return BAD_VALUE;
        }

        mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
                mCamera, mCameraProxy, mCameraId,
                videoSize, mFrameRate, mPreviewSurface,
                mTimeBetweenTimeLapseFrameCaptureUs);
        *cameraSource = mCameraSourceTimeLapse;
    } else {
        *cameraSource = CameraSource::CreateFromCamera(
                mCamera, mCameraProxy, mCameraId, videoSize, mFrameRate,
                mPreviewSurface, true /*storeMetaDataInVideoBuffers*/);
    }
    mCamera.clear();
    mCameraProxy.clear();
    if (*cameraSource == NULL) {
        return UNKNOWN_ERROR;
    }

    if ((*cameraSource)->initCheck() != OK) {
        (*cameraSource).clear();
        *cameraSource = NULL;
        return NO_INIT;
    }

    // When frame rate is not set, the actual frame rate will be set to
    // the current frame rate being used.
    if (mFrameRate == -1) {
        int32_t frameRate = 0;
        CHECK ((*cameraSource)->getFormat()->findInt32(
                    kKeyFrameRate, &frameRate));
        ALOGI("Frame rate is not explicitly set. Use the current frame "
             "rate (%d fps)", frameRate);
        mFrameRate = frameRate;
    }

    CHECK(mFrameRate != -1);

    mIsMetaDataStoredInVideoBuffers =
        (*cameraSource)->isMetaDataStoredInVideoBuffers();

    return OK;
}
status_t StagefrightRecorder::setupVideoEncoder(
        sp<MediaSource> cameraSource,
        int32_t videoBitRate,
        sp<MediaSource> *source) {
    source->clear();

    sp<MetaData> enc_meta = new MetaData;
    enc_meta->setInt32(kKeyBitRate, videoBitRate);
    enc_meta->setInt32(kKeyFrameRate, mFrameRate);

    switch (mVideoEncoder) {
        case VIDEO_ENCODER_H263:
            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
            break;

        case VIDEO_ENCODER_MPEG_4_SP:
            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
            break;

        case VIDEO_ENCODER_H264:
            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
            break;

        default:
            CHECK(!"Should not be here, unsupported video encoding.");
            break;
    }

    sp<MetaData> meta = cameraSource->getFormat();

    int32_t width, height, stride, sliceHeight, colorFormat;
    CHECK(meta->findInt32(kKeyWidth, &width));
    CHECK(meta->findInt32(kKeyHeight, &height));
    CHECK(meta->findInt32(kKeyStride, &stride));
    CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
    CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));

    enc_meta->setInt32(kKeyWidth, width);
    enc_meta->setInt32(kKeyHeight, height);
    enc_meta->setInt32(kKeyIFramesInterval, mIFramesIntervalSec);
    enc_meta->setInt32(kKeyStride, stride);
    enc_meta->setInt32(kKeySliceHeight, sliceHeight);
    enc_meta->setInt32(kKeyColorFormat, colorFormat);
    if (mVideoTimeScale > 0) {
        enc_meta->setInt32(kKeyTimeScale, mVideoTimeScale);
    }
    if (mVideoEncoderProfile != -1) {
        enc_meta->setInt32(kKeyVideoProfile, mVideoEncoderProfile);
    }
    if (mVideoEncoderLevel != -1) {
        enc_meta->setInt32(kKeyVideoLevel, mVideoEncoderLevel);
    }

    OMXClient client;
    CHECK_EQ(client.connect(), (status_t)OK);

    uint32_t encoder_flags = 0;
    if (mIsMetaDataStoredInVideoBuffers) {
        encoder_flags |= OMXCodec::kStoreMetaDataInVideoBuffers;
    }

    // Do not wait for all the input buffers to become available.
    // This give timelapse video recording faster response in
    // receiving output from video encoder component.
    if (mCaptureTimeLapse) {
        encoder_flags |= OMXCodec::kOnlySubmitOneInputBufferAtOneTime;
    }

    sp<MediaSource> encoder = OMXCodec::Create(
            client.interface(), enc_meta,
            true /* createEncoder */, cameraSource,
            NULL, encoder_flags);
    if (encoder == NULL) {
        ALOGW("Failed to create the encoder");
        // When the encoder fails to be created, we need
        // release the camera source due to the camera's lock
        // and unlock mechanism.
        cameraSource->stop();
        return UNKNOWN_ERROR;
    }

    *source = encoder;

    return OK;
}

这里和OMXCodec关联起来
有一个叫media_codecs.xml的配置文件来表明设备支持哪些codec

我们录制MPEG 4的时候还会有声音,所以后面还有个setupAudioEncoder,具体的方法就不展开了,总之就是把声音也作为一个Track加入到MPEG4Writer当中去。
这里插个题外话,Google说把setupAudioEncoder放到后面是为了避免开始录影的那一个提示声音也被录制进去,但是实际发现它这样做还是会有bug,在一些设备上还是会把那声录制进去,这个遇到的都是靠APP自己来播放声音来绕过这个问题的。

另外MPEG4Writer当中有个
start(MetaData*)
启动两个方法
a) startWriterThread

启动一个thread去写

    void MPEG4Writer::threadFunc() {
        ALOGV("threadFunc");

        prctl(PR_SET_NAME, (unsigned long)"MPEG4Writer", 0, 0, 0);

        Mutex::Autolock autoLock(mLock);
        while (!mDone) {
            Chunk chunk;
            bool chunkFound = false;

            while (!mDone && !(chunkFound = findChunkToWrite(&chunk))) {
                mChunkReadyCondition.wait(mLock);
            }

            // Actual write without holding the lock in order to
            // reduce the blocking time for media track threads.
            if (chunkFound) {
                mLock.unlock();
                writeChunkToFile(&chunk);
                mLock.lock();
            }
        }

        writeAllChunks();
    }

b) startTracks

    status_t MPEG4Writer::startTracks(MetaData *params) {
        for (List<Track *>::iterator it = mTracks.begin();
             it != mTracks.end(); ++it) {
            status_t err = (*it)->start(params);

            if (err != OK) {
                for (List<Track *>::iterator it2 = mTracks.begin();
                     it2 != it; ++it2) {
                    (*it2)->stop();
                }

                return err;
            }
        }
        return OK;
    }

然后调用每个Track的start方法

    status_t MPEG4Writer::Track::start(MetaData *params) {
        ...

        initTrackingProgressStatus(params);

        ...

        status_t err = mSource->start(meta.get()); // 这里会去执行CameraSource(start),这两个是相互关联的

        ...

        pthread_create(&mThread, &attr, ThreadWrapper, this);
        return OK;
    }

    void *MPEG4Writer::Track::ThreadWrapper(void *me) {
        Track *track = static_cast<Track *>(me);

        status_t err = track->threadEntry();
        return (void *) err;
    }

通过status_t MPEG4Writer::Track::threadEntry()
是新启动另外一个thread,它里面会通过一个循环来不断读取CameraSource(read)里面的数据,CameraSource里面的数据当然是从driver返回过来的(可以参见CameraSourceListener,CameraSource用一个叫做mFrameReceived的List专门存放从driver过来的数据,如果收到数据会调用mFrameAvailableCondition.signal,若还没有开始录影,这个时候收到的数据是被丢弃的,当然MediaWriter先启动的是CameraSource的start方法,再启动写Track),然后写到文件当中。
注意:准确来说这里MPEG4Writer读取的是OMXCodec里的数据,因为数据先到CameraSource,codec负责编码之后,MPEG4Writer才负责写到文件当中!关于数据在CameraSource/OMXCodec/MPEG4Writer之间是怎么传递的,可以参见http://guoh.org/lifelog/2013/06/interaction-between-stagefright-and-codec/当中讲Buffer的传输过程。

回头再来看,Stagefright做了什么事情?我更觉得它只是一个粘合剂(glue)的用处,它工作在MediaPlayerService这一层,把MediaSource,MediaWriter,Codec以及上层的MediaRecorder绑定在一起,这应该就是它最大的作用,Google用它来替换Opencore也是符合其一贯的工程派作风(相比复杂的学术派而言,虽然Google很多东西也很复杂,但是它一般都是以尽量简单的方式来解决问题)。
让大家觉得有点不习惯的是,它把MediaRecorder放在MediaPlayerService当中,这两个看起来是对立的事情,或者某一天它们会改名字,或者是两者分开,不知道~~

当然这只是个简单的大体介绍,Codec相关的后面争取专门来分析一下!

有些细节的东西在这里没有列出,需要的话会把一些注意点列出来:

1. 时光流逝录影
CameraSource对应的就是CameraSourceTimeLapse

具体做法就是在
dataCallbackTimestamp
当中有skipCurrentFrame

当然它是用些变量来记录和计算
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate) // 两个frame之间的间隔时间
记录上一个frame的(mLastTimeLapseFrameRealTimestampUs) // 上一个frame发生的时间
然后通过frame rate计算出两个frame之间的相距离时间,中间的都透过releaseOneRecordingFrame来drop掉
也就是说driver返回的东西都不变,只是在SW这层我们自己来处理掉

关于Time-lapse相关的可以参阅
https://en.wikipedia.org/wiki/Time-lapse_photography

2. 录影当中需要用到Camera的话是通过ICameraRecordingProxy,即Camera当中的RecordingProxy(这是一个BnCameraRecordingProxy)
当透过binder,将ICameraRecordingProxy传到服务端进程之后,它就变成了Bp,如下:

case SET_CAMERA: {
    ALOGV("SET_CAMERA");
    CHECK_INTERFACE(IMediaRecorder, data, reply);
    sp<ICamera> camera = interface_cast<ICamera>(data.readStrongBinder());
    sp<ICameraRecordingProxy> proxy =
        interface_cast<ICameraRecordingProxy>(data.readStrongBinder());
    reply->writeInt32(setCamera(camera, proxy));
    return NO_ERROR;
} break;

在CameraSource当中会这样去使用

// We get the proxy from Camera, not ICamera. We need to get the proxy
// to the remote Camera owned by the application. Here mCamera is a
// local Camera object created by us. We cannot use the proxy from
// mCamera here.
mCamera = Camera::create(camera);
if (mCamera == 0) return -EBUSY;
mCameraRecordingProxy = proxy;
mCameraFlags |= FLAGS_HOT_CAMERA;

疑问点:

CameraSource当中这个
List > mFramesBeingEncoded;
有什么用?
每编码完一个frame,CameraSource就会将其保存起来,Buffer被release的时候,会反过来release掉这些frame(s),这种做法是为了效率么?为什么不编码完一个frame就将其release掉?
另外不得不再感叹下Google经常的delete this;行为,精妙,但是看起来反常!

Android Camera系统结构

说到架构/构架就觉得心虚,感觉这两个字太高端了,所以我只敢用结构来表达。

以下表述如果存在错误,请指教!

之前有在一篇博客中提到Camera相关的东西
http://guoh.org/lifelog/2012/09/asop-camera-source-code-reading-video-snapshot/

比如

$ANDROID_SRC_HOME/frameworks/base/core/java/android/hardware/Camera.java
$ANDROID_SRC_HOME/frameworks/base/core/jni/android_hardware_Camera.cpp

// 注:目前以下这两个文件在JB当中应该被移动位置了,由此可见Android也是个活跃的项目

$ANDROID_SRC_HOME/frameworks/base/include/camera/Camera.h
$ANDROID_SRC_HOME/frameworks/base/libs/camera/Camera.cpp

这篇博客是阅读http://source.android.com/devices/camera.html,结合在JB代码的基础上写的笔记,可能不是那么详细和有逻辑,只关注重点和要注意的地方,所以如果你刚好读到,但是你又没有什么Android Camera的基础的话,建议还是先看看http://developer.android.com/guide/topics/media/camera.htmlReference

正式开始
大体来说Camera分为这么多部分
APP — JNI — NATIVE — BINDER — CAMERA SERVICE — HAL — DRIVER

下面列出这每一部分对应于AOSP当中的代码

APP($ANDROID_SRC_HOME/frameworks/base/core/java/android/hardware/Camera.java)

JNI($ANDROID_SRC_HOME/frameworks/base/core/jni/android_hardware_Camera.cpp)

NATIVE($ANDROID_SRC_HOME/frameworks/av/camera/Camera.cpp)

BINDER($ANDROID_SRC_HOME/frameworks/av/camera/ICameraService.cpp
       $ANDROID_SRC_HOME/frameworks/av/camera/ICameraClient.cpp)

CAMERA SERVICE($ANDROID_SRC_HOME/frameworks/av/services/camera/libcameraservice/CameraService.cpp)

HAL($ANDROID_SRC_HOME/hardware/libhardware/include/hardware/camera.h
    $ANDROID_SRC_HOME/hardware/libhardware/include/hardware/camera_common.h
    $ANDROID_SRC_HOME/frameworks/av/services/camera/libcameraservice/CameraHardwareInterface.h)

对于Galaxy Nexus来讲,HAL的具体实现位于

$ANDROID_SRC_HOME/hardware/ti/omap4xxx/camera/CameraHal.cpp
$ANDROID_SRC_HOME/hardware/ti/omap4xxx/camera/CameraHalCommon.cpp

头文件位于

$ANDROID_SRC_HOME/hardware/ti/omap4xxx/camera/inc/

对于Galaxy Nexus来讲

DRIVER(https://developers.google.com/android/nexus/drivers)

不知道看了对Camera有没有个大体的认识?
大致了解了的话,就翻代码出来看看自己感兴趣的部分吧

你可以通过repo拉取所有代码到本地看,也可以在线查看。
android/hardware/Camera.java

public static Camera open(int cameraId) {
    return new Camera(cameraId);
}
Camera(int cameraId) {
    mShutterCallback = null;
    mRawImageCallback = null;
    mJpegCallback = null;
    mPreviewCallback = null;
    mPostviewCallback = null;
    mZoomListener = null;

    Looper looper;
    if ((looper = Looper.myLooper()) != null) {
        mEventHandler = new EventHandler(this, looper);
    } else if ((looper = Looper.getMainLooper()) != null) {
        mEventHandler = new EventHandler(this, looper);
    } else {
        mEventHandler = null;
    }

    native_setup(new WeakReference<Camera>(this), cameraId);
}

以上代码描述了我们所要研究的东西的基础,开启camera,跟所有的硬件一样,使用之前需要先开启。
这里有个需要注意的地方就是mEventHandler,这个变量是做什么用的,文档有解释的比较清楚,如下:

Callbacks from other methods are delivered to the event loop of the
thread which called open(). If this thread has no event loop, then
callbacks are delivered to the main application event loop. If there
is no main application event loop, callbacks are not delivered.

如果你不是写一个玩具程序的话,你就应当注意这里的描述。

随着native_setup(new WeakReference(this), cameraId);,代码走到了
android_hardware_Camera.cpp

// connect to camera service
static void android_hardware_Camera_native_setup(JNIEnv *env, jobject thiz,
    jobject weak_this, jint cameraId)
{
    sp<Camera> camera = Camera::connect(cameraId);

    if (camera == NULL) {
        jniThrowRuntimeException(env, "Fail to connect to camera service");
        return;
    }

    // make sure camera hardware is alive
    if (camera->getStatus() != NO_ERROR) {
        jniThrowRuntimeException(env, "Camera initialization failed");
        return;
    }

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        jniThrowRuntimeException(env, "Can't find android/hardware/Camera");
        return;
    }

    // We use a weak reference so the Camera object can be garbage collected.
    // The reference is only used as a proxy for callbacks.
    sp<JNICameraContext> context = new JNICameraContext(env, weak_this, clazz, camera);
    context->incStrong(thiz);
    camera->setListener(context);

    // save context in opaque field
    env->SetIntField(thiz, fields.context, (int)context.get());
}

随着sp camera = Camera::connect(cameraId);,代码走到了
av/include/camera/Camera.h,当然实现都在av/camera/Camera.cpp

sp<Camera> Camera::connect(int cameraId)
{
    ALOGV("connect");
    sp<Camera> c = new Camera();
    const sp<ICameraService>& cs = getCameraService();
    if (cs != 0) {
        c->mCamera = cs->connect(c, cameraId);
    }
    if (c->mCamera != 0) {
        c->mCamera->asBinder()->linkToDeath(c);
        c->mStatus = NO_ERROR;
    } else {
        c.clear();
    }
    return c;
}

这里getCameraService()是一个比较关键的东西,需要仔细看看。

// establish binder interface to camera service
const sp<ICameraService>& Camera::getCameraService()
{
    Mutex::Autolock _l(mLock);
    if (mCameraService.get() == 0) {
        sp<IServiceManager> sm = defaultServiceManager();
        sp<IBinder> binder;
        do {
            binder = sm->getService(String16("media.camera"));
            if (binder != 0)
                break;
            ALOGW("CameraService not published, waiting...");
            usleep(500000); // 0.5 s
        } while(true);
        if (mDeathNotifier == NULL) {
            mDeathNotifier = new DeathNotifier();
        }
        binder->linkToDeath(mDeathNotifier);
        mCameraService = interface_cast<ICameraService>(binder);
    }
    ALOGE_IF(mCameraService==0, "no CameraService!?");
    return mCameraService;
}

然后就是av/camera/ICameraService.cpp当中代理类BpCameraService

// connect to camera service
virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId)
{
    Parcel data, reply;
    data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
    data.writeStrongBinder(cameraClient->asBinder());
    data.writeInt32(cameraId);
    remote()->transact(BnCameraService::CONNECT, data, &reply);
    return interface_cast<ICamera>(reply.readStrongBinder());
}

最终转移到BnCameraServiceconnect方法,但是这是个纯虚函数,而且CameraService继承了BnCameraService,所以请看
av/services/camera/libcameraservice/CameraService.h,当然具体实现还是在av/services/camera/libcameraservice/CameraService.cpp当中。

sp<ICamera> CameraService::connect(
        const sp<ICameraClient>& cameraClient, int cameraId) {
    int callingPid = getCallingPid();

    LOG1("CameraService::connect E (pid %d, id %d)", callingPid, cameraId);

    if (!mModule) {
        ALOGE("Camera HAL module not loaded");
        return NULL;
    }

    sp<Client> client;
    if (cameraId < 0 || cameraId >= mNumberOfCameras) {
        ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).",
            callingPid, cameraId);
        return NULL;
    }

    char value[PROPERTY_VALUE_MAX];
    property_get("sys.secpolicy.camera.disabled", value, "0");
    if (strcmp(value, "1") == 0) {
        // Camera is disabled by DevicePolicyManager.
        ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid);
        return NULL;
    }

    Mutex::Autolock lock(mServiceLock);
    if (mClient[cameraId] != 0) {
        client = mClient[cameraId].promote();
        if (client != 0) {
            if (cameraClient->asBinder() == client->getCameraClient()->asBinder()) {
                LOG1("CameraService::connect X (pid %d) (the same client)",
                     callingPid);
                return client;
            } else {
                ALOGW("CameraService::connect X (pid %d) rejected (existing client).",
                      callingPid);
                return NULL;
            }
        }
        mClient[cameraId].clear();
    }

    if (mBusy[cameraId]) {
        ALOGW("CameraService::connect X (pid %d) rejected"
                " (camera %d is still busy).", callingPid, cameraId);
        return NULL;
    }

    struct camera_info info;
    if (mModule->get_camera_info(cameraId, &info) != OK) {
        ALOGE("Invalid camera id %d", cameraId);
        return NULL;
    }

    int deviceVersion;
    if (mModule->common.module_api_version == CAMERA_MODULE_API_VERSION_2_0) {
        deviceVersion = info.device_version;
    } else {
        deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
    }

    switch(deviceVersion) {
      case CAMERA_DEVICE_API_VERSION_1_0:
        client = new CameraClient(this, cameraClient, cameraId,
                info.facing, callingPid, getpid());
        break;
      case CAMERA_DEVICE_API_VERSION_2_0:
        client = new Camera2Client(this, cameraClient, cameraId,
                info.facing, callingPid, getpid());
        break;
      default:
        ALOGE("Unknown camera device HAL version: %d", deviceVersion);
        return NULL;
    }

    if (client->initialize(mModule) != OK) {
        return NULL;
    }

    cameraClient->asBinder()->linkToDeath(this);

    mClient[cameraId] = client;
    LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId, getpid());
    return client;
}

比如我们这里以HAL 1.0来看,参见
av/services/camera/libcameraservice/CameraClient.h

av/services/camera/libcameraservice/CameraClient.cpp

CameraClient::CameraClient(const sp<CameraService>& cameraService,
        const sp<ICameraClient>& cameraClient,
        int cameraId, int cameraFacing, int clientPid, int servicePid):
        Client(cameraService, cameraClient,
                cameraId, cameraFacing, clientPid, servicePid)
{
    int callingPid = getCallingPid();
    LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);

    mHardware = NULL;
    mMsgEnabled = 0;
    mSurface = 0;
    mPreviewWindow = 0;
    mDestructionStarted = false;

    // Callback is disabled by default
    mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
    mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
    mPlayShutterSound = true;
    LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
status_t CameraClient::initialize(camera_module_t *module) {
    int callingPid = getCallingPid();
    LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);

    char camera_device_name[10];
    status_t res;
    snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);

    mHardware = new CameraHardwareInterface(camera_device_name);
    res = mHardware->initialize(&module->common);
    if (res != OK) {
        ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
                __FUNCTION__, mCameraId, strerror(-res), res);
        mHardware.clear();
        return NO_INIT;
    }

    mHardware->setCallbacks(notifyCallback,
            dataCallback,
            dataCallbackTimestamp,
            (void *)mCameraId);

    // Enable zoom, error, focus, and metadata messages by default
    enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
                  CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);

    LOG1("CameraClient::initialize X (pid %d, id %d)", callingPid, mCameraId);
    return OK;
}

当然还会用到
libhardware//include/hardware/camera.h

libhardware//include/hardware/camera_common.h

av/services/camera/libcameraservice/CameraHardwareInterface.h

status_t initialize(hw_module_t *module)
{
    ALOGI("Opening camera %s", mName.string());
    int rc = module->methods->open(module, mName.string(),
                                   (hw_device_t **)&mDevice);
    if (rc != OK) {
        ALOGE("Could not open camera %s: %d", mName.string(), rc);
        return rc;
    }
    initHalPreviewWindow();
    return rc;
}

这只是一个接口,具体的实现是厂商来做的啦

以上是从上层调用到底层,接着我们还会看一个从底层调用到上层的情况。

从代码看出有绑定3个callback到driver

mHardware->setCallbacks(notifyCallback,
        dataCallback,
        dataCallbackTimestamp,
        (void *)mCameraId);

所以这三个callback会有机会被call到
以dataCallback为例子,其中有一个是

// picture callback - compressed picture ready
void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
    disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);

    sp<ICameraClient> c = mCameraClient;
    mLock.unlock();
    if (c != 0) {
        c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
    }
}

通过ICameraClient调用会app,这里的ICameraClient是BpCameraClient(为什么是Bp,后面会稍加解释)

// generic data callback from camera service to app with image data
void BpCameraClient::dataCallback(int32_t msgType, const sp<IMemory>& imageData,
                  camera_frame_metadata_t *metadata)
{
    ALOGV("dataCallback");
    Parcel data, reply;
    data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
    data.writeInt32(msgType);
    data.writeStrongBinder(imageData->asBinder());
    if (metadata) {
        data.writeInt32(metadata->number_of_faces);
        data.write(metadata->faces, sizeof(camera_face_t) * metadata->number_of_faces);
    }
    remote()->transact(DATA_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
}

于是

status_t BnCameraClient::onTransact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    switch(code) {
        case NOTIFY_CALLBACK: {
            ...
        } break;
        case DATA_CALLBACK: {
            ALOGV("DATA_CALLBACK");
            CHECK_INTERFACE(ICameraClient, data, reply);
            int32_t msgType = data.readInt32();
            sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
            camera_frame_metadata_t *metadata = NULL;
            if (data.dataAvail() > 0) {
                metadata = new camera_frame_metadata_t;
                metadata->number_of_faces = data.readInt32();
                metadata->faces = (camera_face_t *) data.readInplace(
                        sizeof(camera_face_t) * metadata->number_of_faces);
            }
            dataCallback(msgType, imageData, metadata); // 因为pure virtual function的关系,所以会call到Camera当中的实现
            if (metadata) delete metadata;
            return NO_ERROR;
        } break;
        case DATA_CALLBACK_TIMESTAMP: {
            ...
        } break;
        default:
            return BBinder::onTransact(code, data, reply, flags);
    }
}

于是

// callback from camera service when frame or image is ready
void Camera::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
                          camera_frame_metadata_t *metadata)
{
    sp<CameraListener> listener;
    {
        Mutex::Autolock _l(mLock);
        listener = mListener;
    }
    if (listener != NULL) {
        listener->postData(msgType, dataPtr, metadata);
    }
}

从app call到service是BnCameraClient(client是实体,service是代理,从new的代码就可以观察出来)
从service call到app是BpCameraClient(service是实体,client是代理)
而这个Bn到Bp的转换是通过

sp<ICameraClient> cameraClient = interface_cast<ICameraClient>(data.readStrongBinder());

完成的。

后面我们会列举些需要注意的点:

ICameraClient继承自IInterface
BnCameraClient继承自BnInterface,然而BnInterface又继承自ICameraClient,BBinder

Camera继承自BnCameraClient

ICameraService继承自IInterface
BnCameraService继承自BnInterface,然而BnInterface又继承自ICameraService,BBinder

ICamera继承自IInterface
BnCamera继承自BnInterface,然而BnInterface又继承自ICamera,BBinder

CameraService继承自BinderService,BnCameraService,IBinder::DeathRecipient
而BinderService主要是用来实例化service的统一接口

CameraService有个嵌套类Client继承自BnCamera
真正的实现大部分在CameraClient当中(Camera2Client是Google提供的调用新的HAL的一种方式,部分厂商会用这样的方式来实现自己的HAL,比如Qualcomm)
这才是真正的client,直接调用HW或者被HW调用。

sendCommand往driver送参数的时候会先检验是不是CameraService这层可以处理的东西(有些东西不需要到driver的)

cmd == CAMERA_CMD_SET_DISPLAY_ORIENTATION
cmd == CAMERA_CMD_ENABLE_SHUTTER_SOUND
cmd == CAMERA_CMD_PLAY_RECORDING_SOUND
cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT
cmd == CAMERA_CMD_PING
以上这些都CameraService自己处理了
其他均丢给driver(mHardware->sendCommand(cmd, arg1, arg2);)

接下来是一些疑问点:

// these are initialized in the constructor.
sp mHardware; // cleared after disconnect()
mHardware在disconnect当中被clear

// Make sure disconnect() is done once and once only, whether it is called
// from the user directly, or called by the destructor.
if (mHardware == 0) return;
这是防止disconnect调用一次之后,析构函数又调用一次。