() {
+ private final Collator sCollator = Collator.getInstance();
+ @Override
+ public int compare(AppModel object1, AppModel object2) {
+ return sCollator.compare(object1.getLabel(), object2.getLabel());
+ }
+ };
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/GridFragment.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/GridFragment.java
new file mode 100644
index 0000000..779aa46
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/GridFragment.java
@@ -0,0 +1,399 @@
+package org.anbox.appmgr;
+
+/*
+ * Created by Thomas Barrasso on 9/11/12.
+ * Copyright (c) 2012 Loupe Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import android.content.Context;
+import android.content.res.Resources;
+import android.os.Build;
+import android.os.Bundle;
+import android.os.Handler;
+import android.support.v4.app.Fragment;
+import android.util.TypedValue;
+import android.view.Gravity;
+import android.view.LayoutInflater;
+import android.view.View;
+import android.view.ViewGroup;
+import android.view.animation.AnimationUtils;
+import android.widget.AdapterView;
+import android.widget.FrameLayout;
+import android.widget.GridView;
+import android.widget.LinearLayout;
+import android.widget.ListAdapter;
+import android.widget.ListView;
+import android.widget.ProgressBar;
+import android.widget.TextView;
+
+/**
+ * Based on {@link android.app.ListFragment} but adapted for {@link GridView}.
+ */
+public class GridFragment extends Fragment {
+
+ static final int INTERNAL_EMPTY_ID = 0x00ff0001;
+ static final int INTERNAL_PROGRESS_CONTAINER_ID = 0x00ff0002;
+ static final int INTERNAL_LIST_CONTAINER_ID = 0x00ff0003;
+
+ final private Handler mHandler = new Handler();
+
+ final private Runnable mRequestFocus = new Runnable() {
+ public void run() {
+ mGrid.focusableViewAvailable(mGrid);
+ }
+ };
+
+ final private AdapterView.OnItemClickListener mOnClickListener
+ = new AdapterView.OnItemClickListener() {
+ public void onItemClick(AdapterView> parent, View v, int position, long id) {
+ onGridItemClick((GridView) parent, v, position, id);
+ }
+ };
+
+ ListAdapter mAdapter;
+ GridView mGrid;
+ View mEmptyView;
+ TextView mStandardEmptyView;
+ View mProgressContainer;
+ View mGridContainer;
+ CharSequence mEmptyText;
+ boolean mGridShown;
+
+ public GridFragment() { }
+
+ /**
+ * Provide default implementation to return a simple grid view. Subclasses
+ * can override to replace with their own layout. If doing so, the
+ * returned view hierarchy must have a GridView whose id
+ * is {@link android.R.id#list android.R.id.list} and can optionally
+ * have a sibling view id {@link android.R.id#empty android.R.id.empty}
+ * that is to be shown when the grid is empty.
+ *
+ * If you are overriding this method with your own custom content,
+ * consider including the standard layout {@link android.R.layout#list_content}
+ * in your layout file, so that you continue to retain all of the standard
+ * behavior of ListFragment. In particular, this is currently the only
+ * way to have the built-in indeterminant progress state be shown.
+ */
+ @Override
+ public View onCreateView(LayoutInflater inflater, ViewGroup container,
+ Bundle savedInstanceState) {
+ final Context context = getActivity();
+
+ FrameLayout root = new FrameLayout(context);
+
+ // ------------------------------------------------------------------
+
+ LinearLayout pframe = new LinearLayout(context);
+ pframe.setId(INTERNAL_PROGRESS_CONTAINER_ID);
+ pframe.setOrientation(LinearLayout.VERTICAL);
+ pframe.setVisibility(View.GONE);
+ pframe.setGravity(Gravity.CENTER);
+
+ ProgressBar progress = new ProgressBar(context, null,
+ android.R.attr.progressBarStyleLarge);
+ pframe.addView(progress, new FrameLayout.LayoutParams(
+ ViewGroup.LayoutParams.WRAP_CONTENT, ViewGroup.LayoutParams.WRAP_CONTENT));
+
+ root.addView(pframe, new FrameLayout.LayoutParams(
+ ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT));
+
+ // ------------------------------------------------------------------
+
+ FrameLayout lframe = new FrameLayout(context);
+ lframe.setId(INTERNAL_LIST_CONTAINER_ID);
+
+ TextView tv = new TextView(getActivity());
+ tv.setId(INTERNAL_EMPTY_ID);
+ tv.setGravity(Gravity.CENTER);
+ lframe.addView(tv, new FrameLayout.LayoutParams(
+ ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT));
+
+ GridView lv = new GridView(getActivity());
+ lv.setId(android.R.id.list);
+ lv.setDrawSelectorOnTop(false);
+ lv.setColumnWidth(convertDpToPixels(60, getActivity()));
+ lv.setStretchMode(GridView.STRETCH_COLUMN_WIDTH);
+ lv.setNumColumns(GridView.AUTO_FIT);
+ lv.setHorizontalSpacing(convertDpToPixels(20, getActivity()));
+ lv.setVerticalSpacing(convertDpToPixels(20, getActivity()));
+ lv.setSmoothScrollbarEnabled(true);
+
+ // disable overscroll
+ if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD) {
+ lv.setOverScrollMode(ListView.OVER_SCROLL_NEVER);
+ }
+
+ lframe.addView(lv, new FrameLayout.LayoutParams(
+ ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT));
+
+ root.addView(lframe, new FrameLayout.LayoutParams(
+ ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT));
+
+ // ------------------------------------------------------------------
+
+ root.setLayoutParams(new FrameLayout.LayoutParams(
+ ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT));
+
+ return root;
+ }
+
+ /**
+ * Attach to grid view once the view hierarchy has been created.
+ */
+ @Override
+ public void onViewCreated(View view, Bundle savedInstanceState) {
+ super.onViewCreated(view, savedInstanceState);
+ ensureGrid();
+ }
+
+ /**
+ * Detach from {@link GridView}
+ */
+ @Override
+ public void onDestroyView() {
+ mHandler.removeCallbacks(mRequestFocus);
+ mGrid = null;
+ mGridShown = false;
+ mEmptyView = mProgressContainer = mGridContainer = null;
+ mStandardEmptyView = null;
+ super.onDestroyView();
+ }
+
+ public static int convertDpToPixels(float dp, Context context){
+ Resources resources = context.getResources();
+ return (int) TypedValue.applyDimension(
+ TypedValue.COMPLEX_UNIT_DIP,
+ dp,
+ resources.getDisplayMetrics()
+ );
+ }
+
+ /**
+ * This method will be called when an item in the grid is selected.
+ * Subclasses should override. Subclasses can call
+ * getGridView().getItemAtPosition(position) if they need to access the
+ * data associated with the selected item.
+ *
+ * @param g The {@link GridView} where the click happened
+ * @param v The view that was clicked within the {@link GridView}
+ * @param position The position of the view in the grid
+ * @param id The row id of the item that was clicked
+ */
+ public void onGridItemClick(GridView g, View v, int position, long id) {
+
+ }
+
+ /**
+ * Provide the cursor for the {@link GridView}.
+ */
+ public void setGridAdapter(ListAdapter adapter) {
+ final boolean hadAdapter = (mAdapter != null);
+ mAdapter = adapter;
+ if (mGrid != null) {
+ mGrid.setAdapter(adapter);
+ if (!mGridShown && !hadAdapter) {
+ // The grid was hidden, and previously didn't have an
+ // adapter. It is now time to show it.
+ setGridShown(true, (getView().getWindowToken() != null));
+ }
+ }
+ }
+
+ /**
+ * Set the currently selected grid item to the specified
+ * position with the adapter's data
+ *
+ * @param position
+ */
+ public void setSelection(int position) {
+ ensureGrid();
+ mGrid.setSelection(position);
+ }
+
+ /**
+ * Get the position of the currently selected grid item.
+ */
+ public int getSelectedItemPosition() {
+ ensureGrid();
+ return mGrid.getSelectedItemPosition();
+ }
+
+ /**
+ * Get the cursor row ID of the currently selected grid item.
+ */
+ public long getSelectedItemId() {
+ ensureGrid();
+ return mGrid.getSelectedItemId();
+ }
+
+ /**
+ * Get the activity's {@link GridView} widget.
+ */
+ public GridView getGridView() {
+ ensureGrid();
+ return mGrid;
+ }
+
+ /**
+ * The default content for a ListFragment has a TextView that can
+ * be shown when the grid is empty. If you would like to have it
+ * shown, call this method to supply the text it should use.
+ */
+ public void setEmptyText(CharSequence text) {
+ ensureGrid();
+ if (mStandardEmptyView == null) {
+ throw new IllegalStateException("Can't be used with a custom content view");
+ }
+ mStandardEmptyView.setText(text);
+ if (mEmptyText == null) {
+ mGrid.setEmptyView(mStandardEmptyView);
+ }
+ mEmptyText = text;
+ }
+
+ /**
+ * Control whether the grid is being displayed. You can make it not
+ * displayed if you are waiting for the initial data to show in it. During
+ * this time an indeterminant progress indicator will be shown instead.
+ *
+ *
Applications do not normally need to use this themselves. The default
+ * behavior of ListFragment is to start with the grid not being shown, only
+ * showing it once an adapter is given with {@link #setGridAdapter(ListAdapter)}.
+ * If the grid at that point had not been shown, when it does get shown
+ * it will be do without the user ever seeing the hidden state.
+ *
+ * @param shown If true, the grid view is shown; if false, the progress
+ * indicator. The initial value is true.
+ */
+ public void setGridShown(boolean shown) {
+ setGridShown(shown, true);
+ }
+
+ /**
+ * Like {@link #setGridShown(boolean)}, but no animation is used when
+ * transitioning from the previous state.
+ */
+ public void setGridShownNoAnimation(boolean shown) {
+ setGridShown(shown, false);
+ }
+
+ /**
+ * Control whether the grid is being displayed. You can make it not
+ * displayed if you are waiting for the initial data to show in it. During
+ * this time an indeterminant progress indicator will be shown instead.
+ *
+ * @param shown If true, the grid view is shown; if false, the progress
+ * indicator. The initial value is true.
+ * @param animate If true, an animation will be used to transition to the
+ * new state.
+ */
+ private void setGridShown(boolean shown, boolean animate) {
+ ensureGrid();
+ if (mProgressContainer == null) {
+ throw new IllegalStateException("Can't be used with a custom content view");
+ }
+ if (mGridShown == shown) {
+ return;
+ }
+ mGridShown = shown;
+ if (shown) {
+ if (animate) {
+ mProgressContainer.startAnimation(AnimationUtils.loadAnimation(
+ getActivity(), android.R.anim.fade_out));
+ mGridContainer.startAnimation(AnimationUtils.loadAnimation(
+ getActivity(), android.R.anim.fade_in));
+ } else {
+ mProgressContainer.clearAnimation();
+ mGridContainer.clearAnimation();
+ }
+ mProgressContainer.setVisibility(View.GONE);
+ mGridContainer.setVisibility(View.VISIBLE);
+ } else {
+ if (animate) {
+ mProgressContainer.startAnimation(AnimationUtils.loadAnimation(
+ getActivity(), android.R.anim.fade_in));
+ mGridContainer.startAnimation(AnimationUtils.loadAnimation(
+ getActivity(), android.R.anim.fade_out));
+ } else {
+ mProgressContainer.clearAnimation();
+ mGridContainer.clearAnimation();
+ }
+ mProgressContainer.setVisibility(View.VISIBLE);
+ mGridContainer.setVisibility(View.GONE);
+ }
+ }
+
+ /**
+ * Get the ListAdapter associated with this activity's {@link GridView}.
+ */
+ public ListAdapter getGridAdapter() {
+ return mAdapter;
+ }
+
+ private void ensureGrid() {
+ if (mGrid != null) {
+ return;
+ }
+ View root = getView();
+ if (root == null) {
+ throw new IllegalStateException("Content view not yet created");
+ }
+ if (root instanceof GridView) {
+ mGrid = (GridView) root;
+ } else {
+ mStandardEmptyView = (TextView)root.findViewById(INTERNAL_EMPTY_ID);
+ if (mStandardEmptyView == null) {
+ mEmptyView = root.findViewById(android.R.id.empty);
+ } else {
+ mStandardEmptyView.setVisibility(View.GONE);
+ }
+ mProgressContainer = root.findViewById(INTERNAL_PROGRESS_CONTAINER_ID);
+ mGridContainer = root.findViewById(INTERNAL_LIST_CONTAINER_ID);
+ View rawGridView = root.findViewById(android.R.id.list);
+ if (!(rawGridView instanceof GridView)) {
+ if (rawGridView == null) {
+ throw new RuntimeException(
+ "Your content must have a GridView whose id attribute is " +
+ "'android.R.id.list'");
+ }
+ throw new RuntimeException(
+ "Content has view with id attribute 'android.R.id.list' "
+ + "that is not a GridView class");
+ }
+ mGrid = (GridView) rawGridView;
+ if (mEmptyView != null) {
+ mGrid.setEmptyView(mEmptyView);
+ } else if (mEmptyText != null) {
+ mStandardEmptyView.setText(mEmptyText);
+ mGrid.setEmptyView(mStandardEmptyView);
+ }
+ }
+ mGridShown = true;
+ mGrid.setOnItemClickListener(mOnClickListener);
+ if (mAdapter != null) {
+ ListAdapter adapter = mAdapter;
+ mAdapter = null;
+ setGridAdapter(adapter);
+ } else {
+ // We are starting without an adapter, so assume we won't
+ // have our data right away and start with the progress indicator.
+ if (mProgressContainer != null) {
+ setGridShown(false, false);
+ }
+ }
+ mHandler.post(mRequestFocus);
+ }
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/LauncherActivity.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/LauncherActivity.java
new file mode 100644
index 0000000..d89b03e
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/LauncherActivity.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 Simon Fels
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3, as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranties of
+ * MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see .
+ *
+ */
+
+package org.anbox.appmgr;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.content.Intent;
+
+public final class LauncherActivity extends Activity {
+ private static final String TAG = "AnboxAppMgr";
+
+ @Override
+ public void onCreate(Bundle info) {
+ super.onCreate(info);
+
+ Intent intent = new Intent(this, LauncherService.class);
+ startService(intent);
+
+ Log.i(TAG, "Created launcher activity");
+ }
+
+ @Override
+ public void onDestroy() {
+ Log.i(TAG, "Destroyed launcher activity");
+
+ Intent intent = new Intent(this, LauncherService.class);
+ stopService(intent);
+
+ super.onDestroy();
+ }
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/LauncherService.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/LauncherService.java
new file mode 100644
index 0000000..09a94ec
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/LauncherService.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2016 Simon Fels
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3, as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranties of
+ * MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see .
+ *
+ */
+
+package org.anbox.appmgr;
+
+import android.app.Service;
+import android.util.Log;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.os.IBinder;
+
+public final class LauncherService extends Service {
+ public static final String TAG = "AnboxAppMgr";
+
+ private PlatformService mPlatformService;
+ private PackageEventReceiver mPkgEventReceiver;
+
+ public LauncherService() {
+ super();
+ Log.i(TAG, "Service created");
+ }
+
+ @Override
+ public void onCreate() {
+ mPlatformService = new PlatformService(getBaseContext());
+
+ // Send the current list of applications over to the host so
+ // it can rebuild its list of available applications.
+ mPlatformService.sendApplicationListUpdate();
+
+ IntentFilter filter = new IntentFilter();
+ filter.addAction(Intent.ACTION_PACKAGE_ADDED);
+ filter.addAction(Intent.ACTION_PACKAGE_CHANGED);
+ filter.addAction(Intent.ACTION_PACKAGE_REMOVED);
+ filter.addDataScheme("package");
+
+ mPkgEventReceiver = new PackageEventReceiver();
+ registerReceiver(mPkgEventReceiver, filter);
+
+ Log.i(TAG, "Service started");
+ }
+
+ @Override
+ public void onDestroy() {
+ Log.i(TAG, "Service stopped");
+ }
+
+ @Override
+ public IBinder onBind(Intent intent) {
+ return null;
+ }
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/MainApplication.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/MainApplication.java
new file mode 100644
index 0000000..db74d49
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/MainApplication.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Simon Fels
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3, as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranties of
+ * MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see .
+ *
+ */
+
+package org.anbox.appmgr;
+
+import android.app.Application;
+
+public final class MainApplication extends Application {
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PackageEventReceiver.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PackageEventReceiver.java
new file mode 100644
index 0000000..1f299ba
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PackageEventReceiver.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Simon Fels
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3, as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranties of
+ * MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see .
+ *
+ */
+
+package org.anbox.appmgr;
+
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.net.Uri;
+import android.util.Log;
+
+public class PackageEventReceiver extends BroadcastReceiver {
+ private static final String TAG = "AnboxAppMgr";
+
+ private PlatformService mPlatformService;
+
+ private String getPackageName(Intent intent) {
+ Uri uri = intent.getData();
+ String package_name = (uri != null ? uri.getSchemeSpecificPart() : null);
+ return package_name;
+ }
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ if (mPlatformService == null)
+ mPlatformService = new PlatformService(context);
+
+ if (intent.getAction() == Intent.ACTION_PACKAGE_ADDED ||
+ intent.getAction() == Intent.ACTION_PACKAGE_CHANGED) {
+ // Send updated list of applications to the host so that it
+ // can update the list of applications available for the user.
+ mPlatformService.sendApplicationListUpdate();
+ } else if (intent.getAction() == Intent.ACTION_PACKAGE_REMOVED) {
+ // Only send notification when package got removed and not replaced
+ if (!intent.getBooleanExtra(Intent.EXTRA_REPLACING, false)) {
+ mPlatformService.notifyPackageRemoved(getPackageName(intent));
+ }
+ }
+ }
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PackageIntentReceiver.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PackageIntentReceiver.java
new file mode 100644
index 0000000..7d19ce7
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PackageIntentReceiver.java
@@ -0,0 +1,62 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright 2016 Arnab Chakraborty. http://arnab.ch
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this
+ * software and associated documentation files (the "Software"), to deal in the Software
+ * without restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies
+ * or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+ * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
+ * THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+package org.anbox.appmgr;
+
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+
+/**
+ * Helper class to look for interesting changes to the installed apps
+ * so that the loader can be updated.
+ *
+ * @Credit http://developer.android.com/reference/android/content/AsyncTaskLoader.html
+ */
+public class PackageIntentReceiver extends BroadcastReceiver {
+
+ final AppsLoader mLoader;
+
+ public PackageIntentReceiver(AppsLoader loader) {
+ mLoader = loader;
+
+ IntentFilter filter = new IntentFilter(Intent.ACTION_PACKAGE_ADDED);
+ filter.addAction(Intent.ACTION_PACKAGE_REMOVED);
+ filter.addAction(Intent.ACTION_PACKAGE_CHANGED);
+ filter.addDataScheme("package");
+ mLoader.getContext().registerReceiver(this, filter);
+
+ // Register for events related to sdcard installation.
+ IntentFilter sdFilter = new IntentFilter();
+ sdFilter.addAction(Intent.ACTION_EXTERNAL_APPLICATIONS_AVAILABLE);
+ sdFilter.addAction(Intent.ACTION_EXTERNAL_APPLICATIONS_UNAVAILABLE);
+ mLoader.getContext().registerReceiver(this, sdFilter);
+ }
+
+ @Override public void onReceive(Context context, Intent intent) {
+ // Tell the loader about the change.
+ mLoader.onContentChanged();
+ }
+
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PlatformService.java b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PlatformService.java
new file mode 100644
index 0000000..68581aa
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/appmgr/src/org/anbox/appmgr/PlatformService.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 Simon Fels
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 3, as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranties of
+ * MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see .
+ *
+ */
+
+package org.anbox.appmgr;
+
+import android.os.ServiceManager;
+import android.os.IBinder;
+import android.os.Parcel;
+import android.os.RemoteException;
+import android.util.Log;
+import android.content.Intent;
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.content.pm.ApplicationInfo;
+import android.net.Uri;
+import android.graphics.drawable.Drawable;
+import android.graphics.drawable.BitmapDrawable;
+import android.graphics.Bitmap;
+import android.graphics.Bitmap.Config;
+import android.graphics.Canvas;
+
+import java.util.List;
+import java.io.ByteArrayOutputStream;
+
+public final class PlatformService {
+ private static final String TAG = "AnboxAppMgr";
+ private static final String SERVICE_NAME = "org.anbox.PlatformService";
+ private static final String DESCRIPTOR = "org.anbox.IPlatformService";
+
+ private static final int TRANSACTION_updateApplicationList = (IBinder.FIRST_CALL_TRANSACTION + 2);
+
+ private IBinder mService = null;
+ private PackageManager mPm = null;
+
+ private void connectService() {
+ if (mService != null)
+ return;
+ mService = ServiceManager.getService(SERVICE_NAME);
+ }
+
+ public PlatformService(Context context) {
+ if (context != null) {
+ mPm = context.getPackageManager();
+ } else {
+ Log.w(TAG, "No context available");
+ }
+
+ Log.i(TAG, "Connected to platform service");
+ }
+
+ public void notifyPackageRemoved(String packageName) {
+ connectService();
+
+ if (mService == null)
+ return;
+
+ Log.i(TAG, "Sending package removed notification to host service");
+
+ Parcel data = Parcel.obtain();
+ data.writeInterfaceToken(DESCRIPTOR);
+ // No added or updated applications to report
+ data.writeInt(0);
+ // .. but a single removed application
+ data.writeInt(1);
+ data.writeString(packageName);
+
+ Parcel reply = Parcel.obtain();
+ try {
+ mService.transact(TRANSACTION_updateApplicationList, data, reply, 0);
+ }
+ catch (RemoteException ex) {
+ Log.w(TAG, "Failed to send updatePackageList request to remote binder service: " + ex.getMessage());
+ }
+ }
+
+ public void sendApplicationListUpdate() {
+ connectService();
+
+ if (mPm == null || mService == null)
+ return;
+
+ Parcel data = Parcel.obtain();
+ data.writeInterfaceToken(DESCRIPTOR);
+
+ List apps = mPm.getInstalledApplications(0);
+ data.writeInt(apps.size());
+ for (int n = 0; n < apps.size(); n++) {
+ ApplicationInfo appInfo = apps.get(n);
+
+ Intent launchIntent = mPm.getLaunchIntentForPackage(appInfo.packageName);
+ if (launchIntent == null)
+ continue;
+
+ Drawable icon = null;
+ try {
+ icon = mPm.getApplicationIcon(appInfo.packageName);
+ }
+ catch (PackageManager.NameNotFoundException ex) {
+ continue;
+ }
+
+ if (icon == null)
+ continue;
+
+ String name = appInfo.name;
+ CharSequence label = appInfo.loadLabel(mPm);
+ if (label != null)
+ name = label.toString();
+
+ data.writeString(name);
+ data.writeString(appInfo.packageName);
+
+ data.writeString(launchIntent.getAction());
+ if (launchIntent.getData() != null)
+ data.writeString(launchIntent.getData().toString());
+ else
+ data.writeString("");
+ data.writeString(launchIntent.getType());
+ data.writeString(launchIntent.getComponent().getPackageName());
+ data.writeString(launchIntent.getComponent().getClassName());
+ data.writeInt(launchIntent.getCategories().size());
+ for (String category : launchIntent.getCategories())
+ data.writeString(category);
+
+ Bitmap iconBitmap = drawableToBitmap(icon);
+ ByteArrayOutputStream outStream = new ByteArrayOutputStream();
+ iconBitmap.compress(Bitmap.CompressFormat.PNG, 90, outStream);
+ data.writeByteArray(outStream.toByteArray());
+ }
+
+ // We don't have any removed applications to include in the update
+ data.writeInt(0);
+
+ Parcel reply = Parcel.obtain();
+ try {
+ mService.transact(TRANSACTION_updateApplicationList, data, reply, 0);
+ }
+ catch (RemoteException ex) {
+ Log.w(TAG, "Failed to send updatePackageList request to remote binder service: " + ex.getMessage());
+ }
+ }
+
+ private Bitmap drawableToBitmap(Drawable drawable) {
+ if (drawable instanceof BitmapDrawable)
+ return ((BitmapDrawable)drawable).getBitmap();
+
+ Bitmap bitmap = Bitmap.createBitmap(drawable.getIntrinsicWidth(), drawable.getIntrinsicHeight(), Config.ARGB_8888);
+ Canvas canvas = new Canvas(bitmap);
+ drawable.setBounds(0, 0, canvas.getWidth(), canvas.getHeight());
+ drawable.draw(canvas);
+
+ return bitmap;
+ }
+}
diff --git a/src/type3_AndroidCloud/anbox-master/android/audio/MODULE_LICENSE_APACHE2 b/src/type3_AndroidCloud/anbox-master/android/audio/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
diff --git a/src/type3_AndroidCloud/anbox-master/android/audio/NOTICE b/src/type3_AndroidCloud/anbox-master/android/audio/NOTICE
new file mode 100644
index 0000000..3237da6
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/audio/NOTICE
@@ -0,0 +1,190 @@
+
+ Copyright (c) 2008-2009, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/src/type3_AndroidCloud/anbox-master/android/audio/audio_hw.cpp b/src/type3_AndroidCloud/anbox-master/android/audio/audio_hw.cpp
new file mode 100644
index 0000000..f44e738
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/audio/audio_hw.cpp
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audio_hw_generic"
+#define LOG_NDEBUG 0
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+
+#include "anbox/audio/client_info.h"
+
+#define AUDIO_DEVICE_NAME "/dev/anbox_audio"
+#define OUT_SAMPLING_RATE 44100
+#define OUT_BUFFER_SIZE 4096
+#define OUT_LATENCY_MS 20
+#define IN_SAMPLING_RATE 8000
+#define IN_BUFFER_SIZE 320
+
+struct generic_audio_device {
+ struct audio_hw_device device;
+ pthread_mutex_t lock;
+ struct audio_stream_out *output;
+ struct audio_stream_in *input;
+ bool mic_mute;
+};
+
+struct generic_stream_out {
+ struct audio_stream_out stream;
+ struct generic_audio_device *dev;
+ audio_devices_t device;
+ int fd;
+};
+
+struct generic_stream_in {
+ struct audio_stream_in stream;
+ struct generic_audio_device *dev;
+ audio_devices_t device;
+ int fd;
+};
+
+static uint32_t out_get_sample_rate(const struct audio_stream *stream) {
+ return OUT_SAMPLING_RATE;
+}
+
+static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate) {
+ return -ENOSYS;
+}
+
+static size_t out_get_buffer_size(const struct audio_stream *stream) {
+ return OUT_BUFFER_SIZE;
+}
+
+static audio_channel_mask_t out_get_channels(const struct audio_stream *stream) {
+ return AUDIO_CHANNEL_OUT_STEREO;
+}
+
+static audio_format_t out_get_format(const struct audio_stream *stream) {
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+static int out_set_format(struct audio_stream *stream, audio_format_t format) {
+ return -ENOSYS;
+}
+
+static int out_standby(struct audio_stream *stream) {
+ return 0;
+}
+
+static int out_dump(const struct audio_stream *stream, int fd) {
+ struct generic_stream_out *out = (struct generic_stream_out *)stream;
+
+ dprintf(fd,
+ "\tout_dump:\n"
+ "\t\tsample rate: %u\n"
+ "\t\tbuffer size: %u\n"
+ "\t\tchannel mask: %08x\n"
+ "\t\tformat: %d\n"
+ "\t\tdevice: %08x\n"
+ "\t\taudio dev: %p\n\n",
+ out_get_sample_rate(stream),
+ out_get_buffer_size(stream),
+ out_get_channels(stream),
+ out_get_format(stream),
+ out->device,
+ out->dev);
+
+ return 0;
+}
+
+static int out_set_parameters(struct audio_stream *stream, const char *kvpairs) {
+ struct generic_stream_out *out = (struct generic_stream_out *)stream;
+ struct str_parms *parms;
+ char value[32];
+ int ret;
+ long val;
+ char *end;
+
+ parms = str_parms_create_str(kvpairs);
+
+ ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+ value, sizeof(value));
+ if (ret >= 0) {
+ errno = 0;
+ val = strtol(value, &end, 10);
+ if (errno == 0 && (end != NULL) && (*end == '\0') && ((int)val == val)) {
+ out->device = (int)val;
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ str_parms_destroy(parms);
+ return ret;
+}
+
+static char *out_get_parameters(const struct audio_stream *stream, const char *keys) {
+ struct generic_stream_out *out = (struct generic_stream_out *)stream;
+ struct str_parms *query = str_parms_create_str(keys);
+ char *str;
+ char value[256];
+ struct str_parms *reply = str_parms_create();
+ int ret;
+
+ ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
+ if (ret >= 0) {
+ str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, out->device);
+ str = strdup(str_parms_to_str(reply));
+ } else {
+ str = strdup(keys);
+ }
+
+ str_parms_destroy(query);
+ str_parms_destroy(reply);
+ return str;
+}
+
+static uint32_t out_get_latency(const struct audio_stream_out *stream) {
+ return OUT_LATENCY_MS;
+}
+
+static int out_set_volume(struct audio_stream_out *stream, float left,
+ float right) {
+ return -ENOSYS;
+}
+
+static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
+ size_t bytes) {
+ struct generic_stream_out *out = (struct generic_stream_out *)stream;
+ struct generic_audio_device *adev = out->dev;
+
+ pthread_mutex_lock(&adev->lock);
+ if (out->fd >= 0)
+ bytes = write(out->fd, buffer, bytes);
+ pthread_mutex_unlock(&adev->lock);
+ return bytes;
+}
+
+static int out_get_render_position(const struct audio_stream_out *stream,
+ uint32_t *dsp_frames) {
+ return -ENOSYS;
+}
+
+static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) {
+ return 0;
+}
+
+static int out_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) {
+ return 0;
+}
+
+static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
+ int64_t *timestamp) {
+ return -ENOSYS;
+}
+
+static uint32_t in_get_sample_rate(const struct audio_stream *stream) {
+ return IN_SAMPLING_RATE;
+}
+
+static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate) {
+ return -ENOSYS;
+}
+
+static size_t in_get_buffer_size(const struct audio_stream *stream) {
+ return IN_BUFFER_SIZE;
+}
+
+static audio_channel_mask_t in_get_channels(const struct audio_stream *stream) {
+ return AUDIO_CHANNEL_IN_MONO;
+}
+
+static audio_format_t in_get_format(const struct audio_stream *stream) {
+ return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+static int in_set_format(struct audio_stream *stream, audio_format_t format) {
+ return -ENOSYS;
+}
+
+static int in_standby(struct audio_stream *stream) {
+ return 0;
+}
+
+static int in_dump(const struct audio_stream *stream, int fd) {
+ struct generic_stream_in *in = (struct generic_stream_in *)stream;
+
+ dprintf(fd,
+ "\tin_dump:\n"
+ "\t\tsample rate: %u\n"
+ "\t\tbuffer size: %u\n"
+ "\t\tchannel mask: %08x\n"
+ "\t\tformat: %d\n"
+ "\t\tdevice: %08x\n"
+ "\t\taudio dev: %p\n\n",
+ in_get_sample_rate(stream),
+ in_get_buffer_size(stream),
+ in_get_channels(stream),
+ in_get_format(stream),
+ in->device,
+ in->dev);
+
+ return 0;
+}
+
+static int in_set_parameters(struct audio_stream *stream, const char *kvpairs) {
+ struct generic_stream_in *in = (struct generic_stream_in *)stream;
+ struct str_parms *parms;
+ char value[32];
+ int ret;
+ long val;
+ char *end;
+
+ parms = str_parms_create_str(kvpairs);
+
+ ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+ value, sizeof(value));
+ if (ret >= 0) {
+ errno = 0;
+ val = strtol(value, &end, 10);
+ if ((errno == 0) && (end != NULL) && (*end == '\0') && ((int)val == val)) {
+ in->device = (int)val;
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ str_parms_destroy(parms);
+ return ret;
+}
+
+static char *in_get_parameters(const struct audio_stream *stream,
+ const char *keys) {
+ struct generic_stream_in *in = (struct generic_stream_in *)stream;
+ struct str_parms *query = str_parms_create_str(keys);
+ char *str;
+ char value[256];
+ struct str_parms *reply = str_parms_create();
+ int ret;
+
+ ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
+ if (ret >= 0) {
+ str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, in->device);
+ str = strdup(str_parms_to_str(reply));
+ } else {
+ str = strdup(keys);
+ }
+
+ str_parms_destroy(query);
+ str_parms_destroy(reply);
+ return str;
+}
+
+static int in_set_gain(struct audio_stream_in *stream, float gain) {
+ return 0;
+}
+
+static ssize_t in_read(struct audio_stream_in *stream, void *buffer,
+ size_t bytes) {
+ struct generic_stream_in *in = (struct generic_stream_in *)stream;
+ struct generic_audio_device *adev = in->dev;
+
+ pthread_mutex_lock(&adev->lock);
+ if (in->fd >= 0)
+ bytes = read(in->fd, buffer, bytes);
+ if (adev->mic_mute && (bytes > 0)) {
+ memset(buffer, 0, bytes);
+ }
+ pthread_mutex_unlock(&adev->lock);
+
+ return bytes;
+}
+
+static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream) {
+ return 0;
+}
+
+static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) {
+ return 0;
+}
+
+static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) {
+ return 0;
+}
+
+static int connect_audio_server(const anbox::audio::ClientInfo::Type &type) {
+ int fd = socket(AF_LOCAL, SOCK_STREAM, 0);
+ if (fd < 0)
+ return -errno;
+
+ struct sockaddr_un addr;
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, AUDIO_DEVICE_NAME, sizeof(addr.sun_path));
+
+ if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ close(fd);
+ return -errno;
+ }
+
+ // We will send out client type information to the server and the
+ // server will either deny the request by closing the connection
+ // or by sending us the approved client details back.
+ anbox::audio::ClientInfo client_info{type};
+ if (::write(fd, &client_info, sizeof(client_info)) < 0) {
+ close(fd);
+ return -EIO;
+ }
+
+ auto bytes_read = ::read(fd, &client_info, sizeof(client_info));
+ if (bytes_read < 0) {
+ close(fd);
+ return -EIO;
+ }
+
+ // FIXME once we have real client details we need to check if we
+ // got everything we need or if anything is missing.
+
+ ALOGE("Successfully connected Anbox audio server");
+
+ return fd;
+}
+
+static int adev_open_output_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ struct audio_stream_out **stream_out,
+ const char *address __unused) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+ struct generic_stream_out *out;
+ int ret = 0, fd = 0;
+
+ pthread_mutex_lock(&adev->lock);
+ if (adev->output != NULL) {
+ ret = -ENOSYS;
+ goto error;
+ }
+
+ fd = connect_audio_server(anbox::audio::ClientInfo::Type::Playback);
+ if (fd < 0) {
+ ret = fd;
+ ALOGE("Failed to connect with Anbox audio servers (err %d)", ret);
+ goto error;
+ }
+
+ if ((config->format != AUDIO_FORMAT_PCM_16_BIT) ||
+ (config->channel_mask != AUDIO_CHANNEL_OUT_STEREO) ||
+ (config->sample_rate != OUT_SAMPLING_RATE)) {
+ ALOGE("Error opening output stream format %d, channel_mask %04x, sample_rate %u",
+ config->format, config->channel_mask, config->sample_rate);
+ config->format = AUDIO_FORMAT_PCM_16_BIT;
+ config->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ config->sample_rate = OUT_SAMPLING_RATE;
+ ret = -EINVAL;
+ goto error;
+ }
+
+ out = (struct generic_stream_out *)calloc(1, sizeof(struct generic_stream_out));
+ out->fd = fd;
+
+ out->stream.common.get_sample_rate = out_get_sample_rate;
+ out->stream.common.set_sample_rate = out_set_sample_rate;
+ out->stream.common.get_buffer_size = out_get_buffer_size;
+ out->stream.common.get_channels = out_get_channels;
+ out->stream.common.get_format = out_get_format;
+ out->stream.common.set_format = out_set_format;
+ out->stream.common.standby = out_standby;
+ out->stream.common.dump = out_dump;
+ out->stream.common.set_parameters = out_set_parameters;
+ out->stream.common.get_parameters = out_get_parameters;
+ out->stream.common.add_audio_effect = out_add_audio_effect;
+ out->stream.common.remove_audio_effect = out_remove_audio_effect;
+ out->stream.get_latency = out_get_latency;
+ out->stream.set_volume = out_set_volume;
+ out->stream.write = out_write;
+ out->stream.get_render_position = out_get_render_position;
+ out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
+
+ out->dev = adev;
+ out->device = devices;
+ adev->output = (struct audio_stream_out *)out;
+ *stream_out = &out->stream;
+
+error:
+ pthread_mutex_unlock(&adev->lock);
+
+ return ret;
+}
+
+static void adev_close_output_stream(struct audio_hw_device *dev,
+ struct audio_stream_out *stream) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+ pthread_mutex_lock(&adev->lock);
+ if (stream == adev->output) {
+ free(stream);
+ adev->output = NULL;
+ }
+ pthread_mutex_unlock(&adev->lock);
+}
+
+static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs) {
+ return 0;
+}
+
+static char *adev_get_parameters(const struct audio_hw_device *dev,
+ const char *keys) {
+ return strdup("");
+}
+
+static int adev_init_check(const struct audio_hw_device *dev) {
+ return 0;
+}
+
+static int adev_set_voice_volume(struct audio_hw_device *dev, float volume) {
+ return 0;
+}
+
+static int adev_set_master_volume(struct audio_hw_device *dev, float volume) {
+ return -ENOSYS;
+}
+
+static int adev_get_master_volume(struct audio_hw_device *dev, float *volume) {
+ return -ENOSYS;
+}
+
+static int adev_set_master_mute(struct audio_hw_device *dev, bool muted) {
+ return -ENOSYS;
+}
+
+static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted) {
+ return -ENOSYS;
+}
+
+static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode) {
+ return 0;
+}
+
+static int adev_set_mic_mute(struct audio_hw_device *dev, bool state) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+ pthread_mutex_lock(&adev->lock);
+ adev->mic_mute = state;
+ pthread_mutex_unlock(&adev->lock);
+ return 0;
+}
+
+static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+ pthread_mutex_lock(&adev->lock);
+ *state = adev->mic_mute;
+ pthread_mutex_unlock(&adev->lock);
+
+ return 0;
+}
+
+static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
+ const struct audio_config *config) {
+ return IN_BUFFER_SIZE;
+}
+
+static int adev_open_input_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags __unused,
+ const char *address __unused,
+ audio_source_t source __unused) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+ struct generic_stream_in *in;
+ int ret = 0, fd = 0;
+
+ pthread_mutex_lock(&adev->lock);
+ if (adev->input != NULL) {
+ ret = -ENOSYS;
+ goto error;
+ }
+
+ if ((config->format != AUDIO_FORMAT_PCM_16_BIT) ||
+ (config->channel_mask != AUDIO_CHANNEL_IN_MONO) ||
+ (config->sample_rate != IN_SAMPLING_RATE)) {
+ ALOGE("Error opening input stream format %d, channel_mask %04x, sample_rate %u",
+ config->format, config->channel_mask, config->sample_rate);
+ config->format = AUDIO_FORMAT_PCM_16_BIT;
+ config->channel_mask = AUDIO_CHANNEL_IN_MONO;
+ config->sample_rate = IN_SAMPLING_RATE;
+ ret = -EINVAL;
+ goto error;
+ }
+
+ fd = connect_audio_server(anbox::audio::ClientInfo::Type::Recording);
+ if (fd < 0) {
+ ret = fd;
+ ALOGE("Failed to connect with Anbox audio servers (err %d)", ret);
+ goto error;
+ }
+
+ in = (struct generic_stream_in *)calloc(1, sizeof(struct generic_stream_in));
+ in->fd = fd;
+
+ in->stream.common.get_sample_rate = in_get_sample_rate;
+ in->stream.common.set_sample_rate = in_set_sample_rate;
+ in->stream.common.get_buffer_size = in_get_buffer_size;
+ in->stream.common.get_channels = in_get_channels;
+ in->stream.common.get_format = in_get_format;
+ in->stream.common.set_format = in_set_format;
+ in->stream.common.standby = in_standby;
+ in->stream.common.dump = in_dump;
+ in->stream.common.set_parameters = in_set_parameters;
+ in->stream.common.get_parameters = in_get_parameters;
+ in->stream.common.add_audio_effect = in_add_audio_effect;
+ in->stream.common.remove_audio_effect = in_remove_audio_effect;
+ in->stream.set_gain = in_set_gain;
+ in->stream.read = in_read;
+ in->stream.get_input_frames_lost = in_get_input_frames_lost;
+
+ in->dev = adev;
+ in->device = devices;
+ adev->input = (struct audio_stream_in *)in;
+ *stream_in = &in->stream;
+
+error:
+ pthread_mutex_unlock(&adev->lock);
+
+ return ret;
+}
+
+static void adev_close_input_stream(struct audio_hw_device *dev,
+ struct audio_stream_in *stream) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+ pthread_mutex_lock(&adev->lock);
+ if (stream == adev->input) {
+ free(stream);
+ adev->input = NULL;
+ }
+ pthread_mutex_unlock(&adev->lock);
+}
+
+static int adev_dump(const audio_hw_device_t *dev, int fd) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+
+ dprintf(fd,
+ "\nadev_dump:\n"
+ "\tmic_mute: %s\n"
+ "\toutput: %p\n"
+ "\tinput: %p\n\n",
+ adev->mic_mute ? "true" : "false",
+ adev->output,
+ adev->input);
+
+ if (adev->output != NULL)
+ out_dump((const struct audio_stream *)adev->output, fd);
+ if (adev->input != NULL)
+ in_dump((const struct audio_stream *)adev->input, fd);
+
+ return 0;
+}
+
+static int adev_close(hw_device_t *dev) {
+ struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+ adev_close_output_stream((struct audio_hw_device *)dev, adev->output);
+ adev_close_input_stream((struct audio_hw_device *)dev, adev->input);
+
+ free(dev);
+ return 0;
+}
+
+static int adev_open(const hw_module_t *module, const char *name,
+ hw_device_t **device) {
+ struct generic_audio_device *adev;
+
+ if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
+ return -EINVAL;
+
+ adev = (struct generic_audio_device*) calloc(1, sizeof(struct generic_audio_device));
+
+ adev->device.common.tag = HARDWARE_DEVICE_TAG;
+ adev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
+ adev->device.common.module = (struct hw_module_t *)module;
+ adev->device.common.close = adev_close;
+
+ adev->device.init_check = adev_init_check;
+ adev->device.set_voice_volume = adev_set_voice_volume;
+ adev->device.set_master_volume = adev_set_master_volume;
+ adev->device.get_master_volume = adev_get_master_volume;
+ adev->device.set_master_mute = adev_set_master_mute;
+ adev->device.get_master_mute = adev_get_master_mute;
+ adev->device.set_mode = adev_set_mode;
+ adev->device.set_mic_mute = adev_set_mic_mute;
+ adev->device.get_mic_mute = adev_get_mic_mute;
+ adev->device.set_parameters = adev_set_parameters;
+ adev->device.get_parameters = adev_get_parameters;
+ adev->device.get_input_buffer_size = adev_get_input_buffer_size;
+ adev->device.open_output_stream = adev_open_output_stream;
+ adev->device.close_output_stream = adev_close_output_stream;
+ adev->device.open_input_stream = adev_open_input_stream;
+ adev->device.close_input_stream = adev_close_input_stream;
+ adev->device.dump = adev_dump;
+
+ *device = &adev->device.common;
+
+ return 0;
+}
+
+static struct hw_module_methods_t hal_module_methods = {
+ .open = adev_open,
+};
+
+struct audio_module HAL_MODULE_INFO_SYM = {
+ .common = {
+ .tag = HARDWARE_MODULE_TAG,
+ .module_api_version = AUDIO_MODULE_API_VERSION_0_1,
+ .hal_api_version = HARDWARE_HAL_API_VERSION,
+ .id = AUDIO_HARDWARE_MODULE_ID,
+ .name = "Anbox audio HW HAL",
+ .author = "The Android Open Source Project",
+ .methods = &hal_module_methods,
+ },
+};
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/Android.mk b/src/type3_AndroidCloud/anbox-master/android/camera/Android.mk
new file mode 100644
index 0000000..26d4936
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/Android.mk
@@ -0,0 +1,152 @@
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# Emulator camera module########################################################
+
+emulator_camera_module_relative_path := hw
+emulator_camera_cflags := -fno-short-enums -DQEMU_HARDWARE
+emulator_camera_cflags += -Wno-unused-parameter -Wno-missing-field-initializers
+emulator_camera_clang_flags := -Wno-c++11-narrowing
+emulator_camera_shared_libraries := \
+ libbinder \
+ liblog \
+ libutils \
+ libcutils \
+ libcamera_client \
+ libui \
+ libdl \
+ libjpeg \
+ libcamera_metadata
+
+emulator_camera_c_includes := external/jpeg \
+ frameworks/native/include/media/hardware \
+ $(LOCAL_PATH)/../opengl/system/OpenglSystemCommon \
+ $(call include-path-for, camera)
+
+emulator_camera_src := \
+ EmulatedCameraHal.cpp \
+ EmulatedCameraFactory.cpp \
+ EmulatedCameraHotplugThread.cpp \
+ EmulatedBaseCamera.cpp \
+ EmulatedCamera.cpp \
+ EmulatedCameraDevice.cpp \
+ EmulatedQemuCamera.cpp \
+ EmulatedQemuCameraDevice.cpp \
+ EmulatedFakeCamera.cpp \
+ EmulatedFakeCameraDevice.cpp \
+ Converters.cpp \
+ PreviewWindow.cpp \
+ CallbackNotifier.cpp \
+ QemuClient.cpp \
+ JpegCompressor.cpp \
+ EmulatedCamera2.cpp \
+ EmulatedFakeCamera2.cpp \
+ EmulatedQemuCamera2.cpp \
+ fake-pipeline2/Scene.cpp \
+ fake-pipeline2/Sensor.cpp \
+ fake-pipeline2/JpegCompressor.cpp \
+ EmulatedCamera3.cpp \
+ EmulatedFakeCamera3.cpp
+
+# Emulated camera - goldfish / vbox_x86 build###################################
+
+LOCAL_MODULE_RELATIVE_PATH := ${emulator_camera_module_relative_path}
+LOCAL_CFLAGS := ${emulator_camera_cflags}
+LOCAL_CLANG_CFLAGS += ${emulator_camera_clang_flags}
+
+LOCAL_SHARED_LIBRARIES := ${emulator_camera_shared_libraries}
+LOCAL_C_INCLUDES += ${emulator_camera_c_includes}
+LOCAL_SRC_FILES := ${emulator_camera_src}
+
+ifeq ($(TARGET_PRODUCT),vbox_x86)
+LOCAL_MODULE := camera.vbox_x86
+else
+LOCAL_MODULE := camera.goldfish
+endif
+
+include $(BUILD_SHARED_LIBRARY)
+
+# Emulator camera - ranchu build################################################
+
+include ${CLEAR_VARS}
+
+LOCAL_MODULE_RELATIVE_PATH := ${emulator_camera_module_relative_path}
+LOCAL_CFLAGS := ${emulator_camera_cflags}
+LOCAL_CLANG_CFLAGS += ${emulator_camera_clang_flags}
+
+LOCAL_SHARED_LIBRARIES := ${emulator_camera_shared_libraries}
+LOCAL_C_INCLUDES += ${emulator_camera_c_includes}
+LOCAL_SRC_FILES := ${emulator_camera_src}
+
+LOCAL_MODULE := camera.ranchu
+
+include $(BUILD_SHARED_LIBRARY)
+
+# JPEG stub#####################################################################
+
+ifneq ($(TARGET_BUILD_PDK),true)
+
+include $(CLEAR_VARS)
+
+jpeg_module_relative_path := hw
+jpeg_cflags := -fno-short-enums -DQEMU_HARDWARE
+jpeg_cflags += -Wno-unused-parameter
+jpeg_clang_flags += -Wno-c++11-narrowing
+jpeg_shared_libraries := \
+ libcutils \
+ liblog \
+ libskia \
+ libandroid_runtime
+jpeg_c_includes := external/libjpeg-turbo \
+ external/skia/include/core/ \
+ frameworks/base/core/jni/android/graphics \
+ frameworks/native/include
+jpeg_src := JpegStub.cpp
+
+# JPEG stub - goldfish build####################################################
+
+LOCAL_MODULE_RELATIVE_PATH := ${jpeg_module_relative_path}
+LOCAL_CFLAGS += ${jpeg_cflags}
+LOCAL_CLANG_CFLAGS += ${jpeg_clangflags}
+
+LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries}
+LOCAL_C_INCLUDES += ${jpeg_c_includes}
+LOCAL_SRC_FILES := ${jpeg_src}
+
+LOCAL_MODULE := camera.goldfish.jpeg
+
+include $(BUILD_SHARED_LIBRARY)
+
+# JPEG stub - ranchu build######################################################
+
+include ${CLEAR_VARS}
+
+LOCAL_MODULE := camera.ranchu.jpeg
+
+LOCAL_MODULE_RELATIVE_PATH := ${jpeg_module_relative_path}
+LOCAL_CFLAGS += ${jpeg_cflags}
+LOCAL_CLANG_CFLAGS += ${jpeg_clangflags}
+
+LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries}
+LOCAL_C_INCLUDES += ${jpeg_c_includes}
+LOCAL_SRC_FILES := ${jpeg_src}
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif # !PDK
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/CallbackNotifier.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/CallbackNotifier.cpp
new file mode 100644
index 0000000..0dbd50d
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/CallbackNotifier.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class CallbackNotifier that manages callbacks set
+ * via set_callbacks, enable_msg_type, and disable_msg_type camera HAL API.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_CallbackNotifier"
+#include
+#include
+#include "EmulatedCameraDevice.h"
+#include "CallbackNotifier.h"
+#include "JpegCompressor.h"
+
+namespace android {
+
+/* String representation of camera messages. */
+static const char* lCameraMessages[] =
+{
+ "CAMERA_MSG_ERROR",
+ "CAMERA_MSG_SHUTTER",
+ "CAMERA_MSG_FOCUS",
+ "CAMERA_MSG_ZOOM",
+ "CAMERA_MSG_PREVIEW_FRAME",
+ "CAMERA_MSG_VIDEO_FRAME",
+ "CAMERA_MSG_POSTVIEW_FRAME",
+ "CAMERA_MSG_RAW_IMAGE",
+ "CAMERA_MSG_COMPRESSED_IMAGE",
+ "CAMERA_MSG_RAW_IMAGE_NOTIFY",
+ "CAMERA_MSG_PREVIEW_METADATA"
+};
+static const int lCameraMessagesNum = sizeof(lCameraMessages) / sizeof(char*);
+
+/* Builds an array of strings for the given set of messages.
+ * Param:
+ * msg - Messages to get strings for,
+ * strings - Array where to save strings
+ * max - Maximum number of entries in the array.
+ * Return:
+ * Number of strings saved into the 'strings' array.
+ */
+static int GetMessageStrings(uint32_t msg, const char** strings, int max)
+{
+ int index = 0;
+ int out = 0;
+ while (msg != 0 && out < max && index < lCameraMessagesNum) {
+ while ((msg & 0x1) == 0 && index < lCameraMessagesNum) {
+ msg >>= 1;
+ index++;
+ }
+ if ((msg & 0x1) != 0 && index < lCameraMessagesNum) {
+ strings[out] = lCameraMessages[index];
+ out++;
+ msg >>= 1;
+ index++;
+ }
+ }
+
+ return out;
+}
+
+/* Logs messages, enabled by the mask. */
+static void PrintMessages(uint32_t msg)
+{
+ const char* strs[lCameraMessagesNum];
+ const int translated = GetMessageStrings(msg, strs, lCameraMessagesNum);
+ for (int n = 0; n < translated; n++) {
+ ALOGV(" %s", strs[n]);
+ }
+}
+
+CallbackNotifier::CallbackNotifier()
+ : mNotifyCB(NULL),
+ mDataCB(NULL),
+ mDataCBTimestamp(NULL),
+ mGetMemoryCB(NULL),
+ mCBOpaque(NULL),
+ mLastFrameTimestamp(0),
+ mFrameRefreshFreq(0),
+ mMessageEnabler(0),
+ mJpegQuality(90),
+ mVideoRecEnabled(false),
+ mTakingPicture(false)
+{
+}
+
+CallbackNotifier::~CallbackNotifier()
+{
+}
+
+/****************************************************************************
+ * Camera API
+ ***************************************************************************/
+
+void CallbackNotifier::setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user)
+{
+ ALOGV("%s: %p, %p, %p, %p (%p)",
+ __FUNCTION__, notify_cb, data_cb, data_cb_timestamp, get_memory, user);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mNotifyCB = notify_cb;
+ mDataCB = data_cb;
+ mDataCBTimestamp = data_cb_timestamp;
+ mGetMemoryCB = get_memory;
+ mCBOpaque = user;
+}
+
+void CallbackNotifier::enableMessage(uint msg_type)
+{
+ ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
+ PrintMessages(msg_type);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mMessageEnabler |= msg_type;
+ ALOGV("**** Currently enabled messages:");
+ PrintMessages(mMessageEnabler);
+}
+
+void CallbackNotifier::disableMessage(uint msg_type)
+{
+ ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
+ PrintMessages(msg_type);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mMessageEnabler &= ~msg_type;
+ ALOGV("**** Currently enabled messages:");
+ PrintMessages(mMessageEnabler);
+}
+
+status_t CallbackNotifier::enableVideoRecording(int fps)
+{
+ ALOGV("%s: FPS = %d", __FUNCTION__, fps);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mVideoRecEnabled = true;
+ mLastFrameTimestamp = 0;
+ mFrameRefreshFreq = 1000000000LL / fps;
+
+ return NO_ERROR;
+}
+
+void CallbackNotifier::disableVideoRecording()
+{
+ ALOGV("%s:", __FUNCTION__);
+
+ Mutex::Autolock locker(&mObjectLock);
+ mVideoRecEnabled = false;
+ mLastFrameTimestamp = 0;
+ mFrameRefreshFreq = 0;
+}
+
+void CallbackNotifier::releaseRecordingFrame(const void* opaque)
+{
+ List::iterator it = mCameraMemoryTs.begin();
+ for( ; it != mCameraMemoryTs.end(); ++it ) {
+ if ( (*it)->data == opaque ) {
+ (*it)->release( *it );
+ mCameraMemoryTs.erase(it);
+ break;
+ }
+ }
+}
+
+status_t CallbackNotifier::storeMetaDataInBuffers(bool enable)
+{
+ // Return error if metadata is request, otherwise silently agree.
+ return enable ? INVALID_OPERATION : NO_ERROR;
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+void CallbackNotifier::cleanupCBNotifier()
+{
+ Mutex::Autolock locker(&mObjectLock);
+ mMessageEnabler = 0;
+ mNotifyCB = NULL;
+ mDataCB = NULL;
+ mDataCBTimestamp = NULL;
+ mGetMemoryCB = NULL;
+ mCBOpaque = NULL;
+ mLastFrameTimestamp = 0;
+ mFrameRefreshFreq = 0;
+ mJpegQuality = 90;
+ mVideoRecEnabled = false;
+ mTakingPicture = false;
+}
+
+void CallbackNotifier::onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev)
+{
+ if (isMessageEnabled(CAMERA_MSG_VIDEO_FRAME) && isVideoRecordingEnabled() &&
+ isNewVideoFrameTime(timestamp)) {
+ camera_memory_t* cam_buff =
+ mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
+ if (NULL != cam_buff && NULL != cam_buff->data) {
+ memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+ mDataCBTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME,
+ cam_buff, 0, mCBOpaque);
+
+ mCameraMemoryTs.push_back( cam_buff );
+ } else {
+ ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+ }
+ }
+
+ if (isMessageEnabled(CAMERA_MSG_PREVIEW_FRAME)) {
+ camera_memory_t* cam_buff =
+ mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
+ if (NULL != cam_buff && NULL != cam_buff->data) {
+ memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+ mDataCB(CAMERA_MSG_PREVIEW_FRAME, cam_buff, 0, NULL, mCBOpaque);
+ cam_buff->release(cam_buff);
+ } else {
+ ALOGE("%s: Memory failure in CAMERA_MSG_PREVIEW_FRAME", __FUNCTION__);
+ }
+ }
+
+ if (mTakingPicture) {
+ /* This happens just once. */
+ mTakingPicture = false;
+ /* The sequence of callbacks during picture taking is:
+ * - CAMERA_MSG_SHUTTER
+ * - CAMERA_MSG_RAW_IMAGE_NOTIFY
+ * - CAMERA_MSG_COMPRESSED_IMAGE
+ */
+ if (isMessageEnabled(CAMERA_MSG_SHUTTER)) {
+ mNotifyCB(CAMERA_MSG_SHUTTER, 0, 0, mCBOpaque);
+ }
+ if (isMessageEnabled(CAMERA_MSG_RAW_IMAGE_NOTIFY)) {
+ mNotifyCB(CAMERA_MSG_RAW_IMAGE_NOTIFY, 0, 0, mCBOpaque);
+ }
+ if (isMessageEnabled(CAMERA_MSG_COMPRESSED_IMAGE)) {
+ /* Compress the frame to JPEG. Note that when taking pictures, we
+ * have requested camera device to provide us with NV21 frames. */
+ NV21JpegCompressor compressor;
+ status_t res =
+ compressor.compressRawImage(frame, camera_dev->getFrameWidth(),
+ camera_dev->getFrameHeight(),
+ mJpegQuality);
+ if (res == NO_ERROR) {
+ camera_memory_t* jpeg_buff =
+ mGetMemoryCB(-1, compressor.getCompressedSize(), 1, NULL);
+ if (NULL != jpeg_buff && NULL != jpeg_buff->data) {
+ compressor.getCompressedImage(jpeg_buff->data);
+ mDataCB(CAMERA_MSG_COMPRESSED_IMAGE, jpeg_buff, 0, NULL, mCBOpaque);
+ jpeg_buff->release(jpeg_buff);
+ } else {
+ ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+ }
+ } else {
+ ALOGE("%s: Compression failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+ }
+ }
+ }
+}
+
+void CallbackNotifier::onCameraDeviceError(int err)
+{
+ if (isMessageEnabled(CAMERA_MSG_ERROR) && mNotifyCB != NULL) {
+ mNotifyCB(CAMERA_MSG_ERROR, err, 0, mCBOpaque);
+ }
+}
+
+/****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+bool CallbackNotifier::isNewVideoFrameTime(nsecs_t timestamp)
+{
+ Mutex::Autolock locker(&mObjectLock);
+ if ((timestamp - mLastFrameTimestamp) >= mFrameRefreshFreq) {
+ mLastFrameTimestamp = timestamp;
+ return true;
+ }
+ return false;
+}
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/CallbackNotifier.h b/src/type3_AndroidCloud/anbox-master/android/camera/CallbackNotifier.h
new file mode 100644
index 0000000..24784b5
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/CallbackNotifier.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H
+#define HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H
+
+/*
+ * Contains declaration of a class CallbackNotifier that manages callbacks set
+ * via set_callbacks, enable_msg_type, and disable_msg_type camera HAL API.
+ */
+
+#include
+
+namespace android {
+
+class EmulatedCameraDevice;
+
+/* Manages callbacks set via set_callbacks, enable_msg_type, and disable_msg_type
+ * camera HAL API.
+ *
+ * Objects of this class are contained in EmulatedCamera objects, and handle
+ * relevant camera API callbacks.
+ * Locking considerations. Apparently, it's not allowed to call callbacks
+ * registered in this class, while holding a lock: recursion is quite possible,
+ * which will cause a deadlock.
+ */
+class CallbackNotifier {
+public:
+ /* Constructs CallbackNotifier instance. */
+ CallbackNotifier();
+
+ /* Destructs CallbackNotifier instance. */
+ ~CallbackNotifier();
+
+ /****************************************************************************
+ * Camera API
+ ***************************************************************************/
+
+public:
+ /* Actual handler for camera_device_ops_t::set_callbacks callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::set_callbacks callback.
+ */
+ void setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user);
+
+ /* Actual handler for camera_device_ops_t::enable_msg_type callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::enable_msg_type callback.
+ */
+ void enableMessage(uint msg_type);
+
+ /* Actual handler for camera_device_ops_t::disable_msg_type callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::disable_msg_type callback.
+ */
+ void disableMessage(uint msg_type);
+
+ /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers
+ * callback. This method is called by the containing emulated camera object
+ * when it is handing the camera_device_ops_t::store_meta_data_in_buffers
+ * callback.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t storeMetaDataInBuffers(bool enable);
+
+ /* Enables video recording.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::start_recording callback.
+ * Param:
+ * fps - Video frame frequency. This parameter determins when a frame
+ * received via onNextFrameAvailable call will be pushed through the
+ * callback.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ status_t enableVideoRecording(int fps);
+
+ /* Disables video recording.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::stop_recording callback.
+ */
+ void disableVideoRecording();
+
+ /* Releases video frame, sent to the framework.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::release_recording_frame callback.
+ */
+ void releaseRecordingFrame(const void* opaque);
+
+ /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::msg_type_enabled callback.
+ * Note: this method doesn't grab a lock while checking message status, since
+ * upon exit the status would be undefined anyway. So, grab a lock before
+ * calling this method if you care about persisting a defined message status.
+ * Return:
+ * 0 if message is disabled, or non-zero value, if message is enabled.
+ */
+ inline int isMessageEnabled(uint msg_type)
+ {
+ return mMessageEnabler & msg_type;
+ }
+
+ /* Checks id video recording is enabled.
+ * This method is called by the containing emulated camera object when it is
+ * handing the camera_device_ops_t::recording_enabled callback.
+ * Note: this method doesn't grab a lock while checking video recordin status,
+ * since upon exit the status would be undefined anyway. So, grab a lock
+ * before calling this method if you care about persisting of a defined video
+ * recording status.
+ * Return:
+ * true if video recording is enabled, or false if it is disabled.
+ */
+ inline bool isVideoRecordingEnabled()
+ {
+ return mVideoRecEnabled;
+ }
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /* Resets the callback notifier. */
+ void cleanupCBNotifier();
+
+ /* Next frame is available in the camera device.
+ * This is a notification callback that is invoked by the camera device when
+ * a new frame is available.
+ * Note that most likely this method is called in context of a worker thread
+ * that camera device has created for frame capturing.
+ * Param:
+ * frame - Captured frame, or NULL if camera device didn't pull the frame
+ * yet. If NULL is passed in this parameter use GetCurrentFrame method
+ * of the camera device class to obtain the next frame. Also note that
+ * the size of the frame that is passed here (as well as the frame
+ * returned from the GetCurrentFrame method) is defined by the current
+ * frame settings (width + height + pixel format) for the camera device.
+ * timestamp - Frame's timestamp.
+ * camera_dev - Camera device instance that delivered the frame.
+ */
+ void onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev);
+
+ /* Entry point for notifications that occur in camera device.
+ * Param:
+ * err - CAMERA_ERROR_XXX error code.
+ */
+ void onCameraDeviceError(int err);
+
+ /* Sets, or resets taking picture state.
+ * This state control whether or not to notify the framework about compressed
+ * image, shutter, and other picture related events.
+ */
+ void setTakingPicture(bool taking)
+ {
+ mTakingPicture = taking;
+ }
+
+ /* Sets JPEG quality used to compress frame during picture taking. */
+ void setJpegQuality(int jpeg_quality)
+ {
+ mJpegQuality = jpeg_quality;
+ }
+
+ /****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+protected:
+ /* Checks if it's time to push new video frame.
+ * Note that this method must be called while object is locked.
+ * Param:
+ * timestamp - Timestamp for the new frame. */
+ bool isNewVideoFrameTime(nsecs_t timestamp);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Locks this instance for data change. */
+ Mutex mObjectLock;
+
+ /*
+ * Callbacks, registered in set_callbacks.
+ */
+
+ camera_notify_callback mNotifyCB;
+ camera_data_callback mDataCB;
+ camera_data_timestamp_callback mDataCBTimestamp;
+ camera_request_memory mGetMemoryCB;
+ void* mCBOpaque;
+
+ /* video frame queue for the CameraHeapMemory destruction */
+ List mCameraMemoryTs;
+
+ /* Timestamp when last frame has been delivered to the framework. */
+ nsecs_t mLastFrameTimestamp;
+
+ /* Video frequency in nanosec. */
+ nsecs_t mFrameRefreshFreq;
+
+ /* Message enabler. */
+ uint32_t mMessageEnabler;
+
+ /* JPEG quality used to compress frame during picture taking. */
+ int mJpegQuality;
+
+ /* Video recording status. */
+ bool mVideoRecEnabled;
+
+ /* Picture taking status. */
+ bool mTakingPicture;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/Converters.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/Converters.cpp
new file mode 100644
index 0000000..f63f67f
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/Converters.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implemenation of framebuffer conversion routines.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Converter"
+#include
+#include "Converters.h"
+
+namespace android {
+
+static void _YUV420SToRGB565(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ int dUV,
+ uint16_t* rgb,
+ int width,
+ int height)
+{
+ const uint8_t* U_pos = U;
+ const uint8_t* V_pos = V;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
+ const uint8_t nU = *U;
+ const uint8_t nV = *V;
+ *rgb = YUVToRGB565(*Y, nU, nV);
+ Y++; rgb++;
+ *rgb = YUVToRGB565(*Y, nU, nV);
+ Y++; rgb++;
+ }
+ if (y & 0x1) {
+ U_pos = U;
+ V_pos = V;
+ } else {
+ U = U_pos;
+ V = V_pos;
+ }
+ }
+}
+
+static void _YUV420SToRGB32(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ int dUV,
+ uint32_t* rgb,
+ int width,
+ int height)
+{
+ const uint8_t* U_pos = U;
+ const uint8_t* V_pos = V;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
+ const uint8_t nU = *U;
+ const uint8_t nV = *V;
+ *rgb = YUVToRGB32(*Y, nU, nV);
+ Y++; rgb++;
+ *rgb = YUVToRGB32(*Y, nU, nV);
+ Y++; rgb++;
+ }
+ if (y & 0x1) {
+ U_pos = U;
+ V_pos = V;
+ } else {
+ U = U_pos;
+ V = V_pos;
+ }
+ }
+}
+
+void YV12ToRGB565(const void* yv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* Y = reinterpret_cast(yv12);
+ const uint8_t* U = Y + pix_total;
+ const uint8_t* V = U + pix_total / 4;
+ _YUV420SToRGB565(Y, U, V, 1, reinterpret_cast(rgb), width, height);
+}
+
+void YV12ToRGB32(const void* yv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* Y = reinterpret_cast(yv12);
+ const uint8_t* V = Y + pix_total;
+ const uint8_t* U = V + pix_total / 4;
+ _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast(rgb), width, height);
+}
+
+void YU12ToRGB32(const void* yu12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* Y = reinterpret_cast(yu12);
+ const uint8_t* U = Y + pix_total;
+ const uint8_t* V = U + pix_total / 4;
+ _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast(rgb), width, height);
+}
+
+/* Common converter for YUV 4:2:0 interleaved to RGB565.
+ * y, u, and v point to Y,U, and V panes, where U and V values are interleaved.
+ */
+static void _NVXXToRGB565(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ uint16_t* rgb,
+ int width,
+ int height)
+{
+ _YUV420SToRGB565(Y, U, V, 2, rgb, width, height);
+}
+
+/* Common converter for YUV 4:2:0 interleaved to RGB32.
+ * y, u, and v point to Y,U, and V panes, where U and V values are interleaved.
+ */
+static void _NVXXToRGB32(const uint8_t* Y,
+ const uint8_t* U,
+ const uint8_t* V,
+ uint32_t* rgb,
+ int width,
+ int height)
+{
+ _YUV420SToRGB32(Y, U, V, 2, rgb, width, height);
+}
+
+void NV12ToRGB565(const void* nv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast(nv12);
+ _NVXXToRGB565(y, y + pix_total, y + pix_total + 1,
+ reinterpret_cast(rgb), width, height);
+}
+
+void NV12ToRGB32(const void* nv12, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast(nv12);
+ _NVXXToRGB32(y, y + pix_total, y + pix_total + 1,
+ reinterpret_cast(rgb), width, height);
+}
+
+void NV21ToRGB565(const void* nv21, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast(nv21);
+ _NVXXToRGB565(y, y + pix_total + 1, y + pix_total,
+ reinterpret_cast(rgb), width, height);
+}
+
+void NV21ToRGB32(const void* nv21, void* rgb, int width, int height)
+{
+ const int pix_total = width * height;
+ const uint8_t* y = reinterpret_cast(nv21);
+ _NVXXToRGB32(y, y + pix_total + 1, y + pix_total,
+ reinterpret_cast(rgb), width, height);
+}
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/Converters.h b/src/type3_AndroidCloud/anbox-master/android/camera/Converters.h
new file mode 100644
index 0000000..13e2a85
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/Converters.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_CONVERTERS_H
+#define HW_EMULATOR_CAMERA_CONVERTERS_H
+
+#include
+
+#ifndef __BYTE_ORDER
+#error "could not determine byte order"
+#endif
+
+/*
+ * Contains declaration of framebuffer conversion routines.
+ *
+ * NOTE: RGB and big/little endian considerations. Wherewer in this code RGB
+ * pixels are represented as WORD, or DWORD, the color order inside the
+ * WORD / DWORD matches the one that would occur if that WORD / DWORD would have
+ * been read from the typecasted framebuffer:
+ *
+ * const uint32_t rgb = *reinterpret_cast(framebuffer);
+ *
+ * So, if this code runs on the little endian CPU, red color in 'rgb' would be
+ * masked as 0x000000ff, and blue color would be masked as 0x00ff0000, while if
+ * the code runs on a big endian CPU, the red color in 'rgb' would be masked as
+ * 0xff000000, and blue color would be masked as 0x0000ff00,
+ */
+
+namespace android {
+
+/*
+ * RGB565 color masks
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+static const uint16_t kRed5 = 0x001f;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0xf800;
+#else // __BYTE_ORDER
+static const uint16_t kRed5 = 0xf800;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0x001f;
+#endif // __BYTE_ORDER
+static const uint32_t kBlack16 = 0x0000;
+static const uint32_t kWhite16 = kRed5 | kGreen6 | kBlue5;
+
+/*
+ * RGB32 color masks
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+static const uint32_t kRed8 = 0x000000ff;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x00ff0000;
+#else // __BYTE_ORDER
+static const uint32_t kRed8 = 0x00ff0000;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x000000ff;
+#endif // __BYTE_ORDER
+static const uint32_t kBlack32 = 0x00000000;
+static const uint32_t kWhite32 = kRed8 | kGreen8 | kBlue8;
+
+/*
+ * Extracting, and saving color bytes from / to WORD / DWORD RGB.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+/* Extract red, green, and blue bytes from RGB565 word. */
+#define R16(rgb) static_cast(rgb & kRed5)
+#define G16(rgb) static_cast((rgb & kGreen6) >> 5)
+#define B16(rgb) static_cast((rgb & kBlue5) >> 11)
+/* Make 8 bits red, green, and blue, extracted from RGB565 word. */
+#define R16_32(rgb) static_cast(((rgb & kRed5) << 3) | ((rgb & kRed5) >> 2))
+#define G16_32(rgb) static_cast(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) static_cast(((rgb & kBlue5) >> 8) | ((rgb & kBlue5) >> 14))
+/* Extract red, green, and blue bytes from RGB32 dword. */
+#define R32(rgb) static_cast(rgb & kRed8)
+#define G32(rgb) static_cast(((rgb & kGreen8) >> 8) & 0xff)
+#define B32(rgb) static_cast(((rgb & kBlue8) >> 16) & 0xff)
+/* Build RGB565 word from red, green, and blue bytes. */
+#define RGB565(r, g, b) static_cast((((static_cast(b) << 6) | g) << 5) | r)
+/* Build RGB32 dword from red, green, and blue bytes. */
+#define RGB32(r, g, b) static_cast((((static_cast(b) << 8) | g) << 8) | r)
+#else // __BYTE_ORDER
+/* Extract red, green, and blue bytes from RGB565 word. */
+#define R16(rgb) static_cast((rgb & kRed5) >> 11)
+#define G16(rgb) static_cast((rgb & kGreen6) >> 5)
+#define B16(rgb) static_cast(rgb & kBlue5)
+/* Make 8 bits red, green, and blue, extracted from RGB565 word. */
+#define R16_32(rgb) static_cast(((rgb & kRed5) >> 8) | ((rgb & kRed5) >> 14))
+#define G16_32(rgb) static_cast(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) static_cast(((rgb & kBlue5) << 3) | ((rgb & kBlue5) >> 2))
+/* Extract red, green, and blue bytes from RGB32 dword. */
+#define R32(rgb) static_cast((rgb & kRed8) >> 16)
+#define G32(rgb) static_cast((rgb & kGreen8) >> 8)
+#define B32(rgb) static_cast(rgb & kBlue8)
+/* Build RGB565 word from red, green, and blue bytes. */
+#define RGB565(r, g, b) static_cast((((static_cast(r) << 6) | g) << 5) | b)
+/* Build RGB32 dword from red, green, and blue bytes. */
+#define RGB32(r, g, b) static_cast((((static_cast(r) << 8) | g) << 8) | b)
+#endif // __BYTE_ORDER
+
+/* An union that simplifies breaking 32 bit RGB into separate R, G, and B colors.
+ */
+typedef union RGB32_t {
+ uint32_t color;
+ struct {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ uint8_t r; uint8_t g; uint8_t b; uint8_t a;
+#else // __BYTE_ORDER
+ uint8_t a; uint8_t b; uint8_t g; uint8_t r;
+#endif // __BYTE_ORDER
+ };
+} RGB32_t;
+
+
+/* Clips a value to the unsigned 0-255 range, treating negative values as zero.
+ */
+static __inline__ int
+clamp(int x)
+{
+ if (x > 255) return 255;
+ if (x < 0) return 0;
+ return x;
+}
+
+/********************************************************************************
+ * Basics of RGB -> YUV conversion
+ *******************************************************************************/
+
+/*
+ * RGB -> YUV conversion macros
+ */
+#define RGB2Y(r, g, b) (uint8_t)(((66 * (r) + 129 * (g) + 25 * (b) + 128) >> 8) + 16)
+#define RGB2U(r, g, b) (uint8_t)(((-38 * (r) - 74 * (g) + 112 * (b) + 128) >> 8) + 128)
+#define RGB2V(r, g, b) (uint8_t)(((112 * (r) - 94 * (g) - 18 * (b) + 128) >> 8) + 128)
+
+/* Converts R8 G8 B8 color to YUV. */
+static __inline__ void
+R8G8B8ToYUV(uint8_t r, uint8_t g, uint8_t b, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ *y = RGB2Y((int)r, (int)g, (int)b);
+ *u = RGB2U((int)r, (int)g, (int)b);
+ *v = RGB2V((int)r, (int)g, (int)b);
+}
+
+/* Converts RGB565 color to YUV. */
+static __inline__ void
+RGB565ToYUV(uint16_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ R8G8B8ToYUV(R16_32(rgb), G16_32(rgb), B16_32(rgb), y, u, v);
+}
+
+/* Converts RGB32 color to YUV. */
+static __inline__ void
+RGB32ToYUV(uint32_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
+{
+ RGB32_t rgb_c;
+ rgb_c.color = rgb;
+ R8G8B8ToYUV(rgb_c.r, rgb_c.g, rgb_c.b, y, u, v);
+}
+
+/********************************************************************************
+ * Basics of YUV -> RGB conversion.
+ * Note that due to the fact that guest uses RGB only on preview window, and the
+ * RGB format that is used is RGB565, we can limit YUV -> RGB conversions to
+ * RGB565 only.
+ *******************************************************************************/
+
+/*
+ * YUV -> RGB conversion macros
+ */
+
+/* "Optimized" macros that take specialy prepared Y, U, and V values:
+ * C = Y - 16
+ * D = U - 128
+ * E = V - 128
+ */
+#define YUV2RO(C, D, E) clamp((298 * (C) + 409 * (E) + 128) >> 8)
+#define YUV2GO(C, D, E) clamp((298 * (C) - 100 * (D) - 208 * (E) + 128) >> 8)
+#define YUV2BO(C, D, E) clamp((298 * (C) + 516 * (D) + 128) >> 8)
+
+/*
+ * Main macros that take the original Y, U, and V values
+ */
+#define YUV2R(y, u, v) clamp((298 * ((y)-16) + 409 * ((v)-128) + 128) >> 8)
+#define YUV2G(y, u, v) clamp((298 * ((y)-16) - 100 * ((u)-128) - 208 * ((v)-128) + 128) >> 8)
+#define YUV2B(y, u, v) clamp((298 * ((y)-16) + 516 * ((u)-128) + 128) >> 8)
+
+
+/* Converts YUV color to RGB565. */
+static __inline__ uint16_t
+YUVToRGB565(int y, int u, int v)
+{
+ /* Calculate C, D, and E values for the optimized macro. */
+ y -= 16; u -= 128; v -= 128;
+ const uint16_t r = (YUV2RO(y,u,v) >> 3) & 0x1f;
+ const uint16_t g = (YUV2GO(y,u,v) >> 2) & 0x3f;
+ const uint16_t b = (YUV2BO(y,u,v) >> 3) & 0x1f;
+ return RGB565(r, g, b);
+}
+
+/* Converts YUV color to RGB32. */
+static __inline__ uint32_t
+YUVToRGB32(int y, int u, int v)
+{
+ /* Calculate C, D, and E values for the optimized macro. */
+ y -= 16; u -= 128; v -= 128;
+ RGB32_t rgb;
+ rgb.r = YUV2RO(y,u,v) & 0xff;
+ rgb.g = YUV2GO(y,u,v) & 0xff;
+ rgb.b = YUV2BO(y,u,v) & 0xff;
+ return rgb.color;
+}
+
+/* YUV pixel descriptor. */
+struct YUVPixel {
+ uint8_t Y;
+ uint8_t U;
+ uint8_t V;
+
+ inline YUVPixel()
+ : Y(0), U(0), V(0)
+ {
+ }
+
+ inline explicit YUVPixel(uint16_t rgb565)
+ {
+ RGB565ToYUV(rgb565, &Y, &U, &V);
+ }
+
+ inline explicit YUVPixel(uint32_t rgb32)
+ {
+ RGB32ToYUV(rgb32, &Y, &U, &V);
+ }
+
+ inline void get(uint8_t* pY, uint8_t* pU, uint8_t* pV) const
+ {
+ *pY = Y; *pU = U; *pV = V;
+ }
+};
+
+/* Converts an YV12 framebuffer to RGB565 framebuffer.
+ * Param:
+ * yv12 - YV12 framebuffer.
+ * rgb - RGB565 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void YV12ToRGB565(const void* yv12, void* rgb, int width, int height);
+
+/* Converts an YV12 framebuffer to RGB32 framebuffer.
+ * Param:
+ * yv12 - YV12 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void YV12ToRGB32(const void* yv12, void* rgb, int width, int height);
+
+/* Converts an YU12 framebuffer to RGB32 framebuffer.
+ * Param:
+ * yu12 - YU12 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void YU12ToRGB32(const void* yu12, void* rgb, int width, int height);
+
+/* Converts an NV12 framebuffer to RGB565 framebuffer.
+ * Param:
+ * nv12 - NV12 framebuffer.
+ * rgb - RGB565 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV12ToRGB565(const void* nv12, void* rgb, int width, int height);
+
+/* Converts an NV12 framebuffer to RGB32 framebuffer.
+ * Param:
+ * nv12 - NV12 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV12ToRGB32(const void* nv12, void* rgb, int width, int height);
+
+/* Converts an NV21 framebuffer to RGB565 framebuffer.
+ * Param:
+ * nv21 - NV21 framebuffer.
+ * rgb - RGB565 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV21ToRGB565(const void* nv21, void* rgb, int width, int height);
+
+/* Converts an NV21 framebuffer to RGB32 framebuffer.
+ * Param:
+ * nv21 - NV21 framebuffer.
+ * rgb - RGB32 framebuffer.
+ * width, height - Dimensions for both framebuffers.
+ */
+void NV21ToRGB32(const void* nv21, void* rgb, int width, int height);
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_CONVERTERS_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedBaseCamera.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedBaseCamera.cpp
new file mode 100644
index 0000000..5fe7d73
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedBaseCamera.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedBaseCamera that encapsulates
+ * functionality common to all emulated camera device versions ("fake",
+ * "webcam", "video file", "cam2.0" etc.). Instances of this class (for each
+ * emulated camera) are created during the construction of the
+ * EmulatedCameraFactory instance. This class serves as an entry point for all
+ * camera API calls that are common across all versions of the
+ * camera_device_t/camera_module_t structures.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_BaseCamera"
+#include
+
+#include "EmulatedBaseCamera.h"
+
+namespace android {
+
+EmulatedBaseCamera::EmulatedBaseCamera(int cameraId,
+ uint32_t cameraVersion,
+ struct hw_device_t* device,
+ struct hw_module_t* module)
+ : mCameraInfo(NULL),
+ mCameraID(cameraId),
+ mCameraDeviceVersion(cameraVersion)
+{
+ /*
+ * Initialize camera_device descriptor for this object.
+ */
+
+ /* Common header */
+ device->tag = HARDWARE_DEVICE_TAG;
+ device->version = cameraVersion;
+ device->module = module;
+ device->close = NULL; // Must be filled in by child implementation
+}
+
+EmulatedBaseCamera::~EmulatedBaseCamera()
+{
+}
+
+status_t EmulatedBaseCamera::getCameraInfo(struct camera_info* info)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ info->device_version = mCameraDeviceVersion;
+ if (mCameraDeviceVersion >= HARDWARE_DEVICE_API_VERSION(2, 0)) {
+ info->static_camera_characteristics = mCameraInfo;
+ } else {
+ info->static_camera_characteristics = (camera_metadata_t*)0xcafef00d;
+ }
+
+ return NO_ERROR;
+}
+
+status_t EmulatedBaseCamera::plugCamera() {
+ ALOGE("%s: not supported", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+status_t EmulatedBaseCamera::unplugCamera() {
+ ALOGE("%s: not supported", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+camera_device_status_t EmulatedBaseCamera::getHotplugStatus() {
+ return CAMERA_DEVICE_STATUS_PRESENT;
+}
+
+
+
+
+} /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedBaseCamera.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedBaseCamera.h
new file mode 100644
index 0000000..3b8a6a0
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedBaseCamera.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_BASE_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_BASE_CAMERA_H
+
+#include
+#include
+
+namespace android {
+
+/*
+ * Contains declaration of a class EmulatedBaseCamera that encapsulates
+ * functionality common to all emulated camera device versions ("fake",
+ * "webcam", "video file", etc.). Instances of this class (for each emulated
+ * camera) are created during the construction of the EmulatedCameraFactory
+ * instance. This class serves as an entry point for all camera API calls that
+ * are common across all versions of the camera_device_t/camera_module_t
+ * structures.
+ */
+
+class EmulatedBaseCamera {
+ public:
+ EmulatedBaseCamera(int cameraId,
+ uint32_t cameraVersion,
+ struct hw_device_t* device,
+ struct hw_module_t* module);
+
+ virtual ~EmulatedBaseCamera();
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+ public:
+ /* Initializes EmulatedCamera instance.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t Initialize() = 0;
+
+ /****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+ public:
+ /* Creates connection to the emulated camera device.
+ * This method is called in response to hw_module_methods_t::open callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negative EXXX statuses.
+ */
+ virtual status_t connectCamera(hw_device_t** device) = 0;
+
+
+ /* Plug the connection for the emulated camera. Until it's plugged in
+ * calls to connectCamera should fail with -ENODEV.
+ */
+ virtual status_t plugCamera();
+
+ /* Unplug the connection from underneath the emulated camera.
+ * This is similar to closing the camera, except that
+ * all function calls into the camera device will return
+ * -EPIPE errors until the camera is reopened.
+ */
+ virtual status_t unplugCamera();
+
+ virtual camera_device_status_t getHotplugStatus();
+
+ /* Closes connection to the emulated camera.
+ * This method is called in response to camera_device::close callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negative EXXX statuses.
+ */
+ virtual status_t closeCamera() = 0;
+
+ /* Gets camera information.
+ * This method is called in response to camera_module_t::get_camera_info
+ * callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negative EXXX statuses.
+ */
+ virtual status_t getCameraInfo(struct camera_info* info) = 0;
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+ protected:
+ /* Fixed camera information for camera2 devices. Must be valid to access if
+ * mCameraDeviceVersion is >= HARDWARE_DEVICE_API_VERSION(2,0) */
+ camera_metadata_t *mCameraInfo;
+
+ /* Zero-based ID assigned to this camera. */
+ int mCameraID;
+
+ private:
+
+ /* Version of the camera device HAL implemented by this camera */
+ int mCameraDeviceVersion;
+};
+
+} /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_BASE_CAMERA_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera.cpp
new file mode 100644
index 0000000..096c5b2
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera.cpp
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedCamera that encapsulates
+ * functionality common to all emulated cameras ("fake", "webcam", "video file",
+ * etc.). Instances of this class (for each emulated camera) are created during
+ * the construction of the EmulatedCameraFactory instance. This class serves as
+ * an entry point for all camera API calls that defined by camera_device_ops_t
+ * API.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Camera"
+#include
+#include
+#include "EmulatedCamera.h"
+//#include "EmulatedFakeCameraDevice.h"
+#include "Converters.h"
+
+/* Defines whether we should trace parameter changes. */
+#define DEBUG_PARAM 1
+
+namespace android {
+
+#if DEBUG_PARAM
+/* Calculates and logs parameter changes.
+ * Param:
+ * current - Current set of camera parameters.
+ * new_par - String representation of new parameters.
+ */
+static void PrintParamDiff(const CameraParameters& current, const char* new_par);
+#else
+#define PrintParamDiff(current, new_par) (void(0))
+#endif /* DEBUG_PARAM */
+
+/* A helper routine that adds a value to the camera parameter.
+ * Param:
+ * param - Camera parameter to add a value to.
+ * val - Value to add.
+ * Return:
+ * A new string containing parameter with the added value on success, or NULL on
+ * a failure. If non-NULL string is returned, the caller is responsible for
+ * freeing it with 'free'.
+ */
+static char* AddValue(const char* param, const char* val);
+
+EmulatedCamera::EmulatedCamera(int cameraId,
+ struct hw_module_t* module)
+ : EmulatedBaseCamera(cameraId,
+ HARDWARE_DEVICE_API_VERSION(1, 0),
+ &common,
+ module),
+ mPreviewWindow(),
+ mCallbackNotifier()
+{
+ /* camera_device v1 fields. */
+ common.close = EmulatedCamera::close;
+ ops = &mDeviceOps;
+ priv = this;
+}
+
+EmulatedCamera::~EmulatedCamera()
+{
+}
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedCamera::Initialize()
+{
+ /* Preview formats supported by this HAL. */
+ char preview_formats[1024];
+ snprintf(preview_formats, sizeof(preview_formats), "%s,%s,%s",
+ CameraParameters::PIXEL_FORMAT_YUV420SP,
+ CameraParameters::PIXEL_FORMAT_YUV420P,
+ CameraParameters::PIXEL_FORMAT_RGBA8888);
+
+ /*
+ * Fake required parameters.
+ */
+
+ mParameters.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES, "320x240,0x0");
+
+ mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, "512");
+ mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, "384");
+ mParameters.set(CameraParameters::KEY_JPEG_QUALITY, "90");
+ mParameters.set(CameraParameters::KEY_FOCAL_LENGTH, "4.31");
+ mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "54.8");
+ mParameters.set(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, "42.5");
+ mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY, "90");
+
+ /* Preview format settings used here are related to panoramic view only. It's
+ * not related to the preview window that works only with RGB frames, which
+ * is explicitly stated when set_buffers_geometry is called on the preview
+ * window object. */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS,
+ preview_formats);
+ mParameters.setPreviewFormat(CameraParameters::PIXEL_FORMAT_YUV420SP);
+
+ /* We don't relay on the actual frame rates supported by the camera device,
+ * since we will emulate them through timeouts in the emulated camera device
+ * worker thread. */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES,
+ "30,24,20,15,10,5");
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, "(5,30)");
+ mParameters.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, "5,30");
+ mParameters.setPreviewFrameRate(24);
+
+ /* Only PIXEL_FORMAT_YUV420P is accepted by video framework in emulator! */
+ mParameters.set(CameraParameters::KEY_VIDEO_FRAME_FORMAT,
+ CameraParameters::PIXEL_FORMAT_YUV420P);
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS,
+ CameraParameters::PIXEL_FORMAT_JPEG);
+ mParameters.setPictureFormat(CameraParameters::PIXEL_FORMAT_JPEG);
+
+ /* Set exposure compensation. */
+ mParameters.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION, "6");
+ mParameters.set(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION, "-6");
+ mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP, "0.5");
+ mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION, "0");
+
+ /* Sets the white balance modes and the device-dependent scale factors. */
+ char supported_white_balance[1024];
+ snprintf(supported_white_balance, sizeof(supported_white_balance),
+ "%s,%s,%s,%s",
+ CameraParameters::WHITE_BALANCE_AUTO,
+ CameraParameters::WHITE_BALANCE_INCANDESCENT,
+ CameraParameters::WHITE_BALANCE_DAYLIGHT,
+ CameraParameters::WHITE_BALANCE_TWILIGHT);
+ mParameters.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+ supported_white_balance);
+ mParameters.set(CameraParameters::KEY_WHITE_BALANCE,
+ CameraParameters::WHITE_BALANCE_AUTO);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_AUTO, 1.0f, 1.0f);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_INCANDESCENT, 1.38f, 0.60f);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_DAYLIGHT, 1.09f, 0.92f);
+ getCameraDevice()->initializeWhiteBalanceModes(
+ CameraParameters::WHITE_BALANCE_TWILIGHT, 0.92f, 1.22f);
+ getCameraDevice()->setWhiteBalanceMode(CameraParameters::WHITE_BALANCE_AUTO);
+
+ /* Not supported features
+ */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
+ CameraParameters::FOCUS_MODE_FIXED);
+ mParameters.set(CameraParameters::KEY_FOCUS_MODE,
+ CameraParameters::FOCUS_MODE_FIXED);
+
+ return NO_ERROR;
+}
+
+void EmulatedCamera::onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev)
+{
+ /* Notify the preview window first. */
+ mPreviewWindow.onNextFrameAvailable(frame, timestamp, camera_dev);
+
+ /* Notify callback notifier next. */
+ mCallbackNotifier.onNextFrameAvailable(frame, timestamp, camera_dev);
+}
+
+void EmulatedCamera::onCameraDeviceError(int err)
+{
+ /* Errors are reported through the callback notifier */
+ mCallbackNotifier.onCameraDeviceError(err);
+}
+
+/****************************************************************************
+ * Camera API implementation.
+ ***************************************************************************/
+
+status_t EmulatedCamera::connectCamera(hw_device_t** device)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = EINVAL;
+ EmulatedCameraDevice* const camera_dev = getCameraDevice();
+ ALOGE_IF(camera_dev == NULL, "%s: No camera device instance.", __FUNCTION__);
+
+ if (camera_dev != NULL) {
+ /* Connect to the camera device. */
+ res = getCameraDevice()->connectDevice();
+ if (res == NO_ERROR) {
+ *device = &common;
+ }
+ }
+
+ return -res;
+}
+
+status_t EmulatedCamera::closeCamera()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ return cleanupCamera();
+}
+
+status_t EmulatedCamera::getCameraInfo(struct camera_info* info)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ const char* valstr = NULL;
+
+ valstr = mParameters.get(EmulatedCamera::FACING_KEY);
+ if (valstr != NULL) {
+ if (strcmp(valstr, EmulatedCamera::FACING_FRONT) == 0) {
+ info->facing = CAMERA_FACING_FRONT;
+ }
+ else if (strcmp(valstr, EmulatedCamera::FACING_BACK) == 0) {
+ info->facing = CAMERA_FACING_BACK;
+ }
+ } else {
+ info->facing = CAMERA_FACING_BACK;
+ }
+
+ valstr = mParameters.get(EmulatedCamera::ORIENTATION_KEY);
+ if (valstr != NULL) {
+ info->orientation = atoi(valstr);
+ } else {
+ info->orientation = 0;
+ }
+
+ return EmulatedBaseCamera::getCameraInfo(info);
+}
+
+status_t EmulatedCamera::setPreviewWindow(struct preview_stream_ops* window)
+{
+ /* Callback should return a negative errno. */
+ return -mPreviewWindow.setPreviewWindow(window,
+ mParameters.getPreviewFrameRate());
+}
+
+void EmulatedCamera::setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user)
+{
+ mCallbackNotifier.setCallbacks(notify_cb, data_cb, data_cb_timestamp,
+ get_memory, user);
+}
+
+void EmulatedCamera::enableMsgType(int32_t msg_type)
+{
+ mCallbackNotifier.enableMessage(msg_type);
+}
+
+void EmulatedCamera::disableMsgType(int32_t msg_type)
+{
+ mCallbackNotifier.disableMessage(msg_type);
+}
+
+int EmulatedCamera::isMsgTypeEnabled(int32_t msg_type)
+{
+ return mCallbackNotifier.isMessageEnabled(msg_type);
+}
+
+status_t EmulatedCamera::startPreview()
+{
+ /* Callback should return a negative errno. */
+ return -doStartPreview();
+}
+
+void EmulatedCamera::stopPreview()
+{
+ doStopPreview();
+}
+
+int EmulatedCamera::isPreviewEnabled()
+{
+ return mPreviewWindow.isPreviewEnabled();
+}
+
+status_t EmulatedCamera::storeMetaDataInBuffers(int enable)
+{
+ /* Callback should return a negative errno. */
+ return -mCallbackNotifier.storeMetaDataInBuffers(enable);
+}
+
+status_t EmulatedCamera::startRecording()
+{
+ /* Callback should return a negative errno. */
+ return -mCallbackNotifier.enableVideoRecording(mParameters.getPreviewFrameRate());
+}
+
+void EmulatedCamera::stopRecording()
+{
+ mCallbackNotifier.disableVideoRecording();
+}
+
+int EmulatedCamera::isRecordingEnabled()
+{
+ return mCallbackNotifier.isVideoRecordingEnabled();
+}
+
+void EmulatedCamera::releaseRecordingFrame(const void* opaque)
+{
+ mCallbackNotifier.releaseRecordingFrame(opaque);
+}
+
+status_t EmulatedCamera::setAutoFocus()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ /* TODO: Future enhancements. */
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera::cancelAutoFocus()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ /* TODO: Future enhancements. */
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera::takePicture()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res;
+ int width, height;
+ uint32_t org_fmt;
+
+ /* Collect frame info for the picture. */
+ mParameters.getPictureSize(&width, &height);
+ const char* pix_fmt = mParameters.getPictureFormat();
+ if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+ org_fmt = V4L2_PIX_FMT_YUV420;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+ org_fmt = V4L2_PIX_FMT_RGB32;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+ org_fmt = V4L2_PIX_FMT_NV21;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_JPEG) == 0) {
+ /* We only have JPEG converted for NV21 format. */
+ org_fmt = V4L2_PIX_FMT_NV21;
+ } else {
+ ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+ return EINVAL;
+ }
+ /* Get JPEG quality. */
+ int jpeg_quality = mParameters.getInt(CameraParameters::KEY_JPEG_QUALITY);
+ if (jpeg_quality <= 0) {
+ jpeg_quality = 90; /* Fall back to default. */
+ }
+
+ /*
+ * Make sure preview is not running, and device is stopped before taking
+ * picture.
+ */
+
+ const bool preview_on = mPreviewWindow.isPreviewEnabled();
+ if (preview_on) {
+ doStopPreview();
+ }
+
+ /* Camera device should have been stopped when the shutter message has been
+ * enabled. */
+ EmulatedCameraDevice* const camera_dev = getCameraDevice();
+ if (camera_dev->isStarted()) {
+ ALOGW("%s: Camera device is started", __FUNCTION__);
+ camera_dev->stopDeliveringFrames();
+ camera_dev->stopDevice();
+ }
+
+ /*
+ * Take the picture now.
+ */
+
+ /* Start camera device for the picture frame. */
+ ALOGD("Starting camera for picture: %.4s(%s)[%dx%d]",
+ reinterpret_cast(&org_fmt), pix_fmt, width, height);
+ res = camera_dev->startDevice(width, height, org_fmt);
+ if (res != NO_ERROR) {
+ if (preview_on) {
+ doStartPreview();
+ }
+ return res;
+ }
+
+ /* Deliver one frame only. */
+ mCallbackNotifier.setJpegQuality(jpeg_quality);
+ mCallbackNotifier.setTakingPicture(true);
+ res = camera_dev->startDeliveringFrames(true);
+ if (res != NO_ERROR) {
+ mCallbackNotifier.setTakingPicture(false);
+ if (preview_on) {
+ doStartPreview();
+ }
+ }
+ return res;
+}
+
+status_t EmulatedCamera::cancelPicture()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera::setParameters(const char* parms)
+{
+ ALOGV("%s", __FUNCTION__);
+ PrintParamDiff(mParameters, parms);
+
+ CameraParameters new_param;
+ String8 str8_param(parms);
+ new_param.unflatten(str8_param);
+
+ /*
+ * Check for new exposure compensation parameter.
+ */
+ int new_exposure_compensation = new_param.getInt(
+ CameraParameters::KEY_EXPOSURE_COMPENSATION);
+ const int min_exposure_compensation = new_param.getInt(
+ CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION);
+ const int max_exposure_compensation = new_param.getInt(
+ CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION);
+
+ // Checks if the exposure compensation change is supported.
+ if ((min_exposure_compensation != 0) || (max_exposure_compensation != 0)) {
+ if (new_exposure_compensation > max_exposure_compensation) {
+ new_exposure_compensation = max_exposure_compensation;
+ }
+ if (new_exposure_compensation < min_exposure_compensation) {
+ new_exposure_compensation = min_exposure_compensation;
+ }
+
+ const int current_exposure_compensation = mParameters.getInt(
+ CameraParameters::KEY_EXPOSURE_COMPENSATION);
+ if (current_exposure_compensation != new_exposure_compensation) {
+ const float exposure_value = new_exposure_compensation *
+ new_param.getFloat(
+ CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP);
+
+ getCameraDevice()->setExposureCompensation(
+ exposure_value);
+ }
+ }
+
+ const char* new_white_balance = new_param.get(
+ CameraParameters::KEY_WHITE_BALANCE);
+ const char* supported_white_balance = new_param.get(
+ CameraParameters::KEY_SUPPORTED_WHITE_BALANCE);
+
+ if ((supported_white_balance != NULL) && (new_white_balance != NULL) &&
+ (strstr(supported_white_balance, new_white_balance) != NULL)) {
+
+ const char* current_white_balance = mParameters.get(
+ CameraParameters::KEY_WHITE_BALANCE);
+ if ((current_white_balance == NULL) ||
+ (strcmp(current_white_balance, new_white_balance) != 0)) {
+ ALOGV("Setting white balance to %s", new_white_balance);
+ getCameraDevice()->setWhiteBalanceMode(new_white_balance);
+ }
+ }
+
+ mParameters = new_param;
+
+ return NO_ERROR;
+}
+
+/* A dumb variable indicating "no params" / error on the exit from
+ * EmulatedCamera::getParameters(). */
+static char lNoParam = '\0';
+char* EmulatedCamera::getParameters()
+{
+ String8 params(mParameters.flatten());
+ char* ret_str =
+ reinterpret_cast(malloc(sizeof(char) * (params.length()+1)));
+ memset(ret_str, 0, params.length()+1);
+ if (ret_str != NULL) {
+ strncpy(ret_str, params.string(), params.length()+1);
+ return ret_str;
+ } else {
+ ALOGE("%s: Unable to allocate string for %s", __FUNCTION__, params.string());
+ /* Apparently, we can't return NULL fron this routine. */
+ return &lNoParam;
+ }
+}
+
+void EmulatedCamera::putParameters(char* params)
+{
+ /* This method simply frees parameters allocated in getParameters(). */
+ if (params != NULL && params != &lNoParam) {
+ free(params);
+ }
+}
+
+status_t EmulatedCamera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
+{
+ ALOGV("%s: cmd = %d, arg1 = %d, arg2 = %d", __FUNCTION__, cmd, arg1, arg2);
+
+ /* TODO: Future enhancements. */
+ return 0;
+}
+
+void EmulatedCamera::releaseCamera()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ cleanupCamera();
+}
+
+status_t EmulatedCamera::dumpCamera(int fd)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ /* TODO: Future enhancements. */
+ return -EINVAL;
+}
+
+/****************************************************************************
+ * Preview management.
+ ***************************************************************************/
+
+status_t EmulatedCamera::doStartPreview()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ EmulatedCameraDevice* camera_dev = getCameraDevice();
+ if (camera_dev->isStarted()) {
+ camera_dev->stopDeliveringFrames();
+ camera_dev->stopDevice();
+ }
+
+ status_t res = mPreviewWindow.startPreview();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /* Make sure camera device is connected. */
+ if (!camera_dev->isConnected()) {
+ res = camera_dev->connectDevice();
+ if (res != NO_ERROR) {
+ mPreviewWindow.stopPreview();
+ return res;
+ }
+ }
+
+ int width, height;
+ /* Lets see what should we use for frame width, and height. */
+ if (mParameters.get(CameraParameters::KEY_VIDEO_SIZE) != NULL) {
+ mParameters.getVideoSize(&width, &height);
+ } else {
+ mParameters.getPreviewSize(&width, &height);
+ }
+ /* Lets see what should we use for the frame pixel format. Note that there
+ * are two parameters that define pixel formats for frames sent to the
+ * application via notification callbacks:
+ * - KEY_VIDEO_FRAME_FORMAT, that is used when recording video, and
+ * - KEY_PREVIEW_FORMAT, that is used for preview frame notification.
+ * We choose one or the other, depending on "recording-hint" property set by
+ * the framework that indicating intention: video, or preview. */
+ const char* pix_fmt = NULL;
+ const char* is_video = mParameters.get(EmulatedCamera::RECORDING_HINT_KEY);
+ if (is_video == NULL) {
+ is_video = CameraParameters::FALSE;
+ }
+ if (strcmp(is_video, CameraParameters::TRUE) == 0) {
+ /* Video recording is requested. Lets see if video frame format is set. */
+ pix_fmt = mParameters.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
+ }
+ /* If this was not video recording, or video frame format is not set, lets
+ * use preview pixel format for the main framebuffer. */
+ if (pix_fmt == NULL) {
+ pix_fmt = mParameters.getPreviewFormat();
+ }
+ if (pix_fmt == NULL) {
+ ALOGE("%s: Unable to obtain video format", __FUNCTION__);
+ mPreviewWindow.stopPreview();
+ return EINVAL;
+ }
+
+ /* Convert framework's pixel format to the FOURCC one. */
+ uint32_t org_fmt;
+ if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+ org_fmt = V4L2_PIX_FMT_YUV420;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+ org_fmt = V4L2_PIX_FMT_RGB32;
+ } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+ org_fmt = V4L2_PIX_FMT_NV21;
+ } else {
+ ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+ mPreviewWindow.stopPreview();
+ return EINVAL;
+ }
+ ALOGD("Starting camera: %dx%d -> %.4s(%s)",
+ width, height, reinterpret_cast(&org_fmt), pix_fmt);
+ res = camera_dev->startDevice(width, height, org_fmt);
+ if (res != NO_ERROR) {
+ mPreviewWindow.stopPreview();
+ return res;
+ }
+
+ res = camera_dev->startDeliveringFrames(false);
+ if (res != NO_ERROR) {
+ camera_dev->stopDevice();
+ mPreviewWindow.stopPreview();
+ }
+
+ return res;
+}
+
+status_t EmulatedCamera::doStopPreview()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ status_t res = NO_ERROR;
+ if (mPreviewWindow.isPreviewEnabled()) {
+ /* Stop the camera. */
+ if (getCameraDevice()->isStarted()) {
+ getCameraDevice()->stopDeliveringFrames();
+ res = getCameraDevice()->stopDevice();
+ }
+
+ if (res == NO_ERROR) {
+ /* Disable preview as well. */
+ mPreviewWindow.stopPreview();
+ }
+ }
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+status_t EmulatedCamera::cleanupCamera()
+{
+ status_t res = NO_ERROR;
+
+ /* If preview is running - stop it. */
+ res = doStopPreview();
+ if (res != NO_ERROR) {
+ return -res;
+ }
+
+ /* Stop and disconnect the camera device. */
+ EmulatedCameraDevice* const camera_dev = getCameraDevice();
+ if (camera_dev != NULL) {
+ if (camera_dev->isStarted()) {
+ camera_dev->stopDeliveringFrames();
+ res = camera_dev->stopDevice();
+ if (res != NO_ERROR) {
+ return -res;
+ }
+ }
+ if (camera_dev->isConnected()) {
+ res = camera_dev->disconnectDevice();
+ if (res != NO_ERROR) {
+ return -res;
+ }
+ }
+ }
+
+ mCallbackNotifier.cleanupCBNotifier();
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera API callbacks as defined by camera_device_ops structure.
+ *
+ * Callbacks here simply dispatch the calls to an appropriate method inside
+ * EmulatedCamera instance, defined by the 'dev' parameter.
+ ***************************************************************************/
+
+int EmulatedCamera::set_preview_window(struct camera_device* dev,
+ struct preview_stream_ops* window)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->setPreviewWindow(window);
+}
+
+void EmulatedCamera::set_callbacks(
+ struct camera_device* dev,
+ camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->setCallbacks(notify_cb, data_cb, data_cb_timestamp, get_memory, user);
+}
+
+void EmulatedCamera::enable_msg_type(struct camera_device* dev, int32_t msg_type)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->enableMsgType(msg_type);
+}
+
+void EmulatedCamera::disable_msg_type(struct camera_device* dev, int32_t msg_type)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->disableMsgType(msg_type);
+}
+
+int EmulatedCamera::msg_type_enabled(struct camera_device* dev, int32_t msg_type)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->isMsgTypeEnabled(msg_type);
+}
+
+int EmulatedCamera::start_preview(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->startPreview();
+}
+
+void EmulatedCamera::stop_preview(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->stopPreview();
+}
+
+int EmulatedCamera::preview_enabled(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->isPreviewEnabled();
+}
+
+int EmulatedCamera::store_meta_data_in_buffers(struct camera_device* dev,
+ int enable)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->storeMetaDataInBuffers(enable);
+}
+
+int EmulatedCamera::start_recording(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->startRecording();
+}
+
+void EmulatedCamera::stop_recording(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->stopRecording();
+}
+
+int EmulatedCamera::recording_enabled(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->isRecordingEnabled();
+}
+
+void EmulatedCamera::release_recording_frame(struct camera_device* dev,
+ const void* opaque)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->releaseRecordingFrame(opaque);
+}
+
+int EmulatedCamera::auto_focus(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->setAutoFocus();
+}
+
+int EmulatedCamera::cancel_auto_focus(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->cancelAutoFocus();
+}
+
+int EmulatedCamera::take_picture(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->takePicture();
+}
+
+int EmulatedCamera::cancel_picture(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->cancelPicture();
+}
+
+int EmulatedCamera::set_parameters(struct camera_device* dev, const char* parms)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->setParameters(parms);
+}
+
+char* EmulatedCamera::get_parameters(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return NULL;
+ }
+ return ec->getParameters();
+}
+
+void EmulatedCamera::put_parameters(struct camera_device* dev, char* params)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->putParameters(params);
+}
+
+int EmulatedCamera::send_command(struct camera_device* dev,
+ int32_t cmd,
+ int32_t arg1,
+ int32_t arg2)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->sendCommand(cmd, arg1, arg2);
+}
+
+void EmulatedCamera::release(struct camera_device* dev)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return;
+ }
+ ec->releaseCamera();
+}
+
+int EmulatedCamera::dump(struct camera_device* dev, int fd)
+{
+ EmulatedCamera* ec = reinterpret_cast(dev->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->dumpCamera(fd);
+}
+
+int EmulatedCamera::close(struct hw_device_t* device)
+{
+ EmulatedCamera* ec =
+ reinterpret_cast(reinterpret_cast(device)->priv);
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->closeCamera();
+}
+
+/****************************************************************************
+ * Static initializer for the camera callback API
+ ****************************************************************************/
+
+camera_device_ops_t EmulatedCamera::mDeviceOps = {
+ EmulatedCamera::set_preview_window,
+ EmulatedCamera::set_callbacks,
+ EmulatedCamera::enable_msg_type,
+ EmulatedCamera::disable_msg_type,
+ EmulatedCamera::msg_type_enabled,
+ EmulatedCamera::start_preview,
+ EmulatedCamera::stop_preview,
+ EmulatedCamera::preview_enabled,
+ EmulatedCamera::store_meta_data_in_buffers,
+ EmulatedCamera::start_recording,
+ EmulatedCamera::stop_recording,
+ EmulatedCamera::recording_enabled,
+ EmulatedCamera::release_recording_frame,
+ EmulatedCamera::auto_focus,
+ EmulatedCamera::cancel_auto_focus,
+ EmulatedCamera::take_picture,
+ EmulatedCamera::cancel_picture,
+ EmulatedCamera::set_parameters,
+ EmulatedCamera::get_parameters,
+ EmulatedCamera::put_parameters,
+ EmulatedCamera::send_command,
+ EmulatedCamera::release,
+ EmulatedCamera::dump
+};
+
+/****************************************************************************
+ * Common keys
+ ***************************************************************************/
+
+const char EmulatedCamera::FACING_KEY[] = "prop-facing";
+const char EmulatedCamera::ORIENTATION_KEY[] = "prop-orientation";
+const char EmulatedCamera::RECORDING_HINT_KEY[] = "recording-hint";
+
+/****************************************************************************
+ * Common string values
+ ***************************************************************************/
+
+const char EmulatedCamera::FACING_BACK[] = "back";
+const char EmulatedCamera::FACING_FRONT[] = "front";
+
+/****************************************************************************
+ * Helper routines
+ ***************************************************************************/
+
+static char* AddValue(const char* param, const char* val)
+{
+ const size_t len1 = strlen(param);
+ const size_t len2 = strlen(val);
+ char* ret = reinterpret_cast(malloc(len1 + len2 + 2));
+ ALOGE_IF(ret == NULL, "%s: Memory failure", __FUNCTION__);
+ if (ret != NULL) {
+ memcpy(ret, param, len1);
+ ret[len1] = ',';
+ memcpy(ret + len1 + 1, val, len2);
+ ret[len1 + len2 + 1] = '\0';
+ }
+ return ret;
+}
+
+/****************************************************************************
+ * Parameter debugging helpers
+ ***************************************************************************/
+
+#if DEBUG_PARAM
+static void PrintParamDiff(const CameraParameters& current,
+ const char* new_par)
+{
+ char tmp[2048];
+ const char* wrk = new_par;
+
+ /* Divided with ';' */
+ const char* next = strchr(wrk, ';');
+ while (next != NULL) {
+ snprintf(tmp, sizeof(tmp), "%.*s", (int)(intptr_t)(next-wrk), wrk);
+ /* in the form key=value */
+ char* val = strchr(tmp, '=');
+ if (val != NULL) {
+ *val = '\0'; val++;
+ const char* in_current = current.get(tmp);
+ if (in_current != NULL) {
+ if (strcmp(in_current, val)) {
+ ALOGD("=== Value changed: %s: %s -> %s", tmp, in_current, val);
+ }
+ } else {
+ ALOGD("+++ New parameter: %s=%s", tmp, val);
+ }
+ } else {
+ ALOGW("No value separator in %s", tmp);
+ }
+ wrk = next + 1;
+ next = strchr(wrk, ';');
+ }
+}
+#endif /* DEBUG_PARAM */
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera.h
new file mode 100644
index 0000000..9825d5d
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_H
+
+/*
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality common to all version 1.0 emulated camera devices ("fake",
+ * "webcam", "video file", etc.). Instances of this class (for each emulated
+ * camera) are created during the construction of the EmulatedCameraFactory
+ * instance. This class serves as an entry point for all camera API calls that
+ * defined by camera_device_ops_t API.
+ */
+
+#include
+#include "EmulatedBaseCamera.h"
+#include "EmulatedCameraDevice.h"
+#include "PreviewWindow.h"
+#include "CallbackNotifier.h"
+
+namespace android {
+
+/* Encapsulates functionality common to all version 1.0 emulated camera devices
+ * ("fake", "webcam", "file stream", etc.).
+ *
+ * Note that EmulatedCameraFactory instantiates object of this class just once,
+ * when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are ivoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedCamera : public camera_device, public EmulatedBaseCamera {
+public:
+ /* Constructs EmulatedCamera instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+ EmulatedCamera(int cameraId,
+ struct hw_module_t* module);
+
+ /* Destructs EmulatedCamera instance. */
+ virtual ~EmulatedCamera();
+
+ /****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+public:
+ /* Gets emulated camera device used by this instance of the emulated camera.
+ */
+ virtual EmulatedCameraDevice* getCameraDevice() = 0;
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ /** Override of base class method */
+ virtual status_t Initialize();
+
+ /* Next frame is available in the camera device.
+ * This is a notification callback that is invoked by the camera device when
+ * a new frame is available.
+ * Note that most likely this method is called in context of a worker thread
+ * that camera device has created for frame capturing.
+ * Param:
+ * frame - Captured frame, or NULL if camera device didn't pull the frame
+ * yet. If NULL is passed in this parameter use GetCurrentFrame method
+ * of the camera device class to obtain the next frame. Also note that
+ * the size of the frame that is passed here (as well as the frame
+ * returned from the GetCurrentFrame method) is defined by the current
+ * frame settings (width + height + pixel format) for the camera device.
+ * timestamp - Frame's timestamp.
+ * camera_dev - Camera device instance that delivered the frame.
+ */
+ virtual void onNextFrameAvailable(const void* frame,
+ nsecs_t timestamp,
+ EmulatedCameraDevice* camera_dev);
+
+ /* Entry point for notifications that occur in camera device.
+ * Param:
+ * err - CAMERA_ERROR_XXX error code.
+ */
+ virtual void onCameraDeviceError(int err);
+
+ /****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+public:
+ /** Override of base class method */
+ virtual status_t connectCamera(hw_device_t** device);
+
+ /** Override of base class method */
+ virtual status_t closeCamera();
+
+ /** Override of base class method */
+ virtual status_t getCameraInfo(struct camera_info* info);
+
+ /****************************************************************************
+ * Camera API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+protected:
+ /* Actual handler for camera_device_ops_t::set_preview_window callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t setPreviewWindow(struct preview_stream_ops *window);
+
+ /* Actual handler for camera_device_ops_t::set_callbacks callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void setCallbacks(camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user);
+
+ /* Actual handler for camera_device_ops_t::enable_msg_type callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void enableMsgType(int32_t msg_type);
+
+ /* Actual handler for camera_device_ops_t::disable_msg_type callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void disableMsgType(int32_t msg_type);
+
+ /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * 0 if message(s) is (are) disabled, != 0 if enabled.
+ */
+ virtual int isMsgTypeEnabled(int32_t msg_type);
+
+ /* Actual handler for camera_device_ops_t::start_preview callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t startPreview();
+
+ /* Actual handler for camera_device_ops_t::stop_preview callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void stopPreview();
+
+ /* Actual handler for camera_device_ops_t::preview_enabled callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * 0 if preview is disabled, != 0 if enabled.
+ */
+ virtual int isPreviewEnabled();
+
+ /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t storeMetaDataInBuffers(int enable);
+
+ /* Actual handler for camera_device_ops_t::start_recording callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t startRecording();
+
+ /* Actual handler for camera_device_ops_t::stop_recording callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void stopRecording();
+
+ /* Actual handler for camera_device_ops_t::recording_enabled callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * 0 if recording is disabled, != 0 if enabled.
+ */
+ virtual int isRecordingEnabled();
+
+ /* Actual handler for camera_device_ops_t::release_recording_frame callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void releaseRecordingFrame(const void* opaque);
+
+ /* Actual handler for camera_device_ops_t::auto_focus callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t setAutoFocus();
+
+ /* Actual handler for camera_device_ops_t::cancel_auto_focus callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t cancelAutoFocus();
+
+ /* Actual handler for camera_device_ops_t::take_picture callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t takePicture();
+
+ /* Actual handler for camera_device_ops_t::cancel_picture callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t cancelPicture();
+
+ /* Actual handler for camera_device_ops_t::set_parameters callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t setParameters(const char* parms);
+
+ /* Actual handler for camera_device_ops_t::get_parameters callback.
+ * NOTE: When this method is called the object is locked.
+ * Return:
+ * Flattened parameters string. The caller will free the buffer allocated
+ * for the string by calling camera_device_ops_t::put_parameters callback.
+ */
+ virtual char* getParameters();
+
+ /* Actual handler for camera_device_ops_t::put_parameters callback.
+ * Called to free the string returned from camera_device_ops_t::get_parameters
+ * callback. There is nothing more to it: the name of the callback is just
+ * misleading.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void putParameters(char* params);
+
+ /* Actual handler for camera_device_ops_t::send_command callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+
+ /* Actual handler for camera_device_ops_t::release callback.
+ * NOTE: When this method is called the object is locked.
+ */
+ virtual void releaseCamera();
+
+ /* Actual handler for camera_device_ops_t::dump callback.
+ * NOTE: When this method is called the object is locked.
+ * Note that failures in this method are reported as negave EXXX statuses.
+ */
+ virtual status_t dumpCamera(int fd);
+
+ /****************************************************************************
+ * Preview management.
+ ***************************************************************************/
+
+protected:
+ /* Starts preview.
+ * Note that when this method is called mPreviewWindow may be NULL,
+ * indicating that framework has an intention to start displaying video
+ * frames, but didn't create the preview window yet.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t doStartPreview();
+
+ /* Stops preview.
+ * This method reverts DoStartPreview.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status on failure.
+ */
+ virtual status_t doStopPreview();
+
+ /****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+protected:
+ /* Cleans up camera when released. */
+ virtual status_t cleanupCamera();
+
+ /****************************************************************************
+ * Camera API callbacks as defined by camera_device_ops structure.
+ * See hardware/libhardware/include/hardware/camera.h for information on
+ * each of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera class defined by the
+ * 'camera_device' parameter.
+ ***************************************************************************/
+
+private:
+ static int set_preview_window(struct camera_device* dev,
+ struct preview_stream_ops* window);
+
+ static void set_callbacks(struct camera_device* dev,
+ camera_notify_callback notify_cb,
+ camera_data_callback data_cb,
+ camera_data_timestamp_callback data_cb_timestamp,
+ camera_request_memory get_memory,
+ void* user);
+
+ static void enable_msg_type(struct camera_device* dev, int32_t msg_type);
+
+ static void disable_msg_type(struct camera_device* dev, int32_t msg_type);
+
+ static int msg_type_enabled(struct camera_device* dev, int32_t msg_type);
+
+ static int start_preview(struct camera_device* dev);
+
+ static void stop_preview(struct camera_device* dev);
+
+ static int preview_enabled(struct camera_device* dev);
+
+ static int store_meta_data_in_buffers(struct camera_device* dev, int enable);
+
+ static int start_recording(struct camera_device* dev);
+
+ static void stop_recording(struct camera_device* dev);
+
+ static int recording_enabled(struct camera_device* dev);
+
+ static void release_recording_frame(struct camera_device* dev,
+ const void* opaque);
+
+ static int auto_focus(struct camera_device* dev);
+
+ static int cancel_auto_focus(struct camera_device* dev);
+
+ static int take_picture(struct camera_device* dev);
+
+ static int cancel_picture(struct camera_device* dev);
+
+ static int set_parameters(struct camera_device* dev, const char* parms);
+
+ static char* get_parameters(struct camera_device* dev);
+
+ static void put_parameters(struct camera_device* dev, char* params);
+
+ static int send_command(struct camera_device* dev,
+ int32_t cmd,
+ int32_t arg1,
+ int32_t arg2);
+
+ static void release(struct camera_device* dev);
+
+ static int dump(struct camera_device* dev, int fd);
+
+ static int close(struct hw_device_t* device);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Locks this instance for parameters, state, etc. change. */
+ Mutex mObjectLock;
+
+ /* Camera parameters. */
+ CameraParameters mParameters;
+
+ /* Preview window. */
+ PreviewWindow mPreviewWindow;
+
+ /* Callback notifier. */
+ CallbackNotifier mCallbackNotifier;
+
+private:
+ /* Registered callbacks implementing camera API. */
+ static camera_device_ops_t mDeviceOps;
+
+ /****************************************************************************
+ * Common keys
+ ***************************************************************************/
+
+public:
+ static const char FACING_KEY[];
+ static const char ORIENTATION_KEY[];
+ static const char RECORDING_HINT_KEY[];
+
+ /****************************************************************************
+ * Common string values
+ ***************************************************************************/
+
+ /* Possible values for FACING_KEY */
+ static const char FACING_BACK[];
+ static const char FACING_FRONT[];
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera2.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera2.cpp
new file mode 100644
index 0000000..ea7424b
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera2.cpp
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedCamera that encapsulates
+ * functionality common to all version 2.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera2_device_ops_t API.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera2_Camera"
+#include
+
+#include "EmulatedCamera2.h"
+#include "system/camera_metadata.h"
+
+namespace android {
+
+/* Constructs EmulatedCamera2 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+EmulatedCamera2::EmulatedCamera2(int cameraId,
+ struct hw_module_t* module):
+ EmulatedBaseCamera(cameraId,
+ CAMERA_DEVICE_API_VERSION_2_0,
+ &common,
+ module)
+{
+ common.close = EmulatedCamera2::close;
+ ops = &sDeviceOps;
+ priv = this;
+
+ mNotifyCb = NULL;
+
+ mRequestQueueSrc = NULL;
+ mFrameQueueDst = NULL;
+
+ mVendorTagOps.get_camera_vendor_section_name =
+ EmulatedCamera2::get_camera_vendor_section_name;
+ mVendorTagOps.get_camera_vendor_tag_name =
+ EmulatedCamera2::get_camera_vendor_tag_name;
+ mVendorTagOps.get_camera_vendor_tag_type =
+ EmulatedCamera2::get_camera_vendor_tag_type;
+ mVendorTagOps.parent = this;
+
+ mStatusPresent = true;
+}
+
+/* Destructs EmulatedCamera2 instance. */
+EmulatedCamera2::~EmulatedCamera2() {
+}
+
+/****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedCamera2::Initialize() {
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+status_t EmulatedCamera2::connectCamera(hw_device_t** device) {
+ *device = &common;
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera2::closeCamera() {
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera2::getCameraInfo(struct camera_info* info) {
+ return EmulatedBaseCamera::getCameraInfo(info);
+}
+
+/****************************************************************************
+ * Camera Device API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+/** Request input queue */
+
+int EmulatedCamera2::requestQueueNotify() {
+ return INVALID_OPERATION;
+}
+
+/** Count of requests in flight */
+int EmulatedCamera2::getInProgressCount() {
+ return INVALID_OPERATION;
+}
+
+/** Cancel all captures in flight */
+int EmulatedCamera2::flushCapturesInProgress() {
+ return INVALID_OPERATION;
+}
+
+/** Construct a default request for a given use case */
+int EmulatedCamera2::constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request) {
+ return INVALID_OPERATION;
+}
+
+/** Output stream creation and management */
+
+int EmulatedCamera2::allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers) {
+ return INVALID_OPERATION;
+}
+
+int EmulatedCamera2::registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers) {
+ return INVALID_OPERATION;
+}
+
+
+int EmulatedCamera2::releaseStream(uint32_t stream_id) {
+ return INVALID_OPERATION;
+}
+
+/** Reprocessing input stream management */
+
+int EmulatedCamera2::allocateReprocessStream(
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers) {
+ return INVALID_OPERATION;
+}
+
+int EmulatedCamera2::allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id) {
+ return INVALID_OPERATION;
+}
+
+int EmulatedCamera2::releaseReprocessStream(uint32_t stream_id) {
+ return INVALID_OPERATION;
+}
+
+/** 3A triggering */
+
+int EmulatedCamera2::triggerAction(uint32_t trigger_id,
+ int ext1, int ext2) {
+ return INVALID_OPERATION;
+}
+
+/** Custom tag query methods */
+
+const char* EmulatedCamera2::getVendorSectionName(uint32_t tag) {
+ return NULL;
+}
+
+const char* EmulatedCamera2::getVendorTagName(uint32_t tag) {
+ return NULL;
+}
+
+int EmulatedCamera2::getVendorTagType(uint32_t tag) {
+ return -1;
+}
+
+/** Debug methods */
+
+int EmulatedCamera2::dump(int fd) {
+ return INVALID_OPERATION;
+}
+
+/****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+/****************************************************************************
+ * Camera API callbacks as defined by camera2_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera2.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera2 class defined by the
+ * 'camera_device2' parameter, or set a member value in the same.
+ ***************************************************************************/
+
+EmulatedCamera2* getInstance(const camera2_device_t *d) {
+ const EmulatedCamera2* cec = static_cast(d);
+ return const_cast(cec);
+}
+
+int EmulatedCamera2::set_request_queue_src_ops(const camera2_device_t *d,
+ const camera2_request_queue_src_ops *queue_src_ops) {
+ EmulatedCamera2* ec = getInstance(d);
+ ec->mRequestQueueSrc = queue_src_ops;
+ return NO_ERROR;
+}
+
+int EmulatedCamera2::notify_request_queue_not_empty(const camera2_device_t *d) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->requestQueueNotify();
+}
+
+int EmulatedCamera2::set_frame_queue_dst_ops(const camera2_device_t *d,
+ const camera2_frame_queue_dst_ops *queue_dst_ops) {
+ EmulatedCamera2* ec = getInstance(d);
+ ec->mFrameQueueDst = queue_dst_ops;
+ return NO_ERROR;
+}
+
+int EmulatedCamera2::get_in_progress_count(const camera2_device_t *d) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->getInProgressCount();
+}
+
+int EmulatedCamera2::flush_captures_in_progress(const camera2_device_t *d) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->flushCapturesInProgress();
+}
+
+int EmulatedCamera2::construct_default_request(const camera2_device_t *d,
+ int request_template,
+ camera_metadata_t **request) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->constructDefaultRequest(request_template, request);
+}
+
+int EmulatedCamera2::allocate_stream(const camera2_device_t *d,
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->allocateStream(width, height, format, stream_ops,
+ stream_id, format_actual, usage, max_buffers);
+}
+
+int EmulatedCamera2::register_stream_buffers(const camera2_device_t *d,
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->registerStreamBuffers(stream_id,
+ num_buffers,
+ buffers);
+}
+int EmulatedCamera2::release_stream(const camera2_device_t *d,
+ uint32_t stream_id) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->releaseStream(stream_id);
+}
+
+int EmulatedCamera2::allocate_reprocess_stream(const camera2_device_t *d,
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->allocateReprocessStream(width, height, format,
+ reprocess_stream_ops, stream_id, consumer_usage, max_buffers);
+}
+
+int EmulatedCamera2::allocate_reprocess_stream_from_stream(
+ const camera2_device_t *d,
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->allocateReprocessStreamFromStream(output_stream_id,
+ reprocess_stream_ops, stream_id);
+}
+
+
+int EmulatedCamera2::release_reprocess_stream(const camera2_device_t *d,
+ uint32_t stream_id) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->releaseReprocessStream(stream_id);
+}
+
+int EmulatedCamera2::trigger_action(const camera2_device_t *d,
+ uint32_t trigger_id,
+ int ext1,
+ int ext2) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->triggerAction(trigger_id, ext1, ext2);
+}
+
+int EmulatedCamera2::set_notify_callback(const camera2_device_t *d,
+ camera2_notify_callback notify_cb, void* user) {
+ EmulatedCamera2* ec = getInstance(d);
+ Mutex::Autolock l(ec->mMutex);
+ ec->mNotifyCb = notify_cb;
+ ec->mNotifyUserPtr = user;
+ return NO_ERROR;
+}
+
+int EmulatedCamera2::get_metadata_vendor_tag_ops(const camera2_device_t *d,
+ vendor_tag_query_ops_t **ops) {
+ EmulatedCamera2* ec = getInstance(d);
+ *ops = static_cast(
+ &ec->mVendorTagOps);
+ return NO_ERROR;
+}
+
+const char* EmulatedCamera2::get_camera_vendor_section_name(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera2* ec = static_cast(v)->parent;
+ return ec->getVendorSectionName(tag);
+}
+
+const char* EmulatedCamera2::get_camera_vendor_tag_name(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera2* ec = static_cast(v)->parent;
+ return ec->getVendorTagName(tag);
+}
+
+int EmulatedCamera2::get_camera_vendor_tag_type(
+ const vendor_tag_query_ops_t *v,
+ uint32_t tag) {
+ EmulatedCamera2* ec = static_cast(v)->parent;
+ return ec->getVendorTagType(tag);
+}
+
+int EmulatedCamera2::dump(const camera2_device_t *d, int fd) {
+ EmulatedCamera2* ec = getInstance(d);
+ return ec->dump(fd);
+}
+
+int EmulatedCamera2::close(struct hw_device_t* device) {
+ EmulatedCamera2* ec =
+ static_cast(
+ reinterpret_cast(device) );
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera2 device", __FUNCTION__);
+ return -EINVAL;
+ }
+ return ec->closeCamera();
+}
+
+void EmulatedCamera2::sendNotification(int32_t msgType,
+ int32_t ext1, int32_t ext2, int32_t ext3) {
+ camera2_notify_callback notifyCb;
+ {
+ Mutex::Autolock l(mMutex);
+ notifyCb = mNotifyCb;
+ }
+ if (notifyCb != NULL) {
+ notifyCb(msgType, ext1, ext2, ext3, mNotifyUserPtr);
+ }
+}
+
+camera2_device_ops_t EmulatedCamera2::sDeviceOps = {
+ EmulatedCamera2::set_request_queue_src_ops,
+ EmulatedCamera2::notify_request_queue_not_empty,
+ EmulatedCamera2::set_frame_queue_dst_ops,
+ EmulatedCamera2::get_in_progress_count,
+ EmulatedCamera2::flush_captures_in_progress,
+ EmulatedCamera2::construct_default_request,
+ EmulatedCamera2::allocate_stream,
+ EmulatedCamera2::register_stream_buffers,
+ EmulatedCamera2::release_stream,
+ EmulatedCamera2::allocate_reprocess_stream,
+ EmulatedCamera2::allocate_reprocess_stream_from_stream,
+ EmulatedCamera2::release_reprocess_stream,
+ EmulatedCamera2::trigger_action,
+ EmulatedCamera2::set_notify_callback,
+ EmulatedCamera2::get_metadata_vendor_tag_ops,
+ EmulatedCamera2::dump
+};
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera2.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera2.h
new file mode 100644
index 0000000..9f5f67b
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera2.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H
+
+/*
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality common to all version 2.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera2_device_ops_t API.
+ */
+
+#include "hardware/camera2.h"
+#include "system/camera_metadata.h"
+#include "EmulatedBaseCamera.h"
+#include
+#include
+
+namespace android {
+
+/* Encapsulates functionality common to all version 2.0 emulated camera devices
+ *
+ * Note that EmulatedCameraFactory instantiates object of this class just once,
+ * when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are invoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedCamera2 : public camera2_device, public EmulatedBaseCamera {
+public:
+ /* Constructs EmulatedCamera2 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+ EmulatedCamera2(int cameraId,
+ struct hw_module_t* module);
+
+ /* Destructs EmulatedCamera2 instance. */
+ virtual ~EmulatedCamera2();
+
+ /****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+public:
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ virtual status_t Initialize();
+
+ /****************************************************************************
+ * Camera module API and generic hardware device API implementation
+ ***************************************************************************/
+
+public:
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info* info) = 0;
+
+ /****************************************************************************
+ * Camera API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+protected:
+ /** Request input queue notification */
+ virtual int requestQueueNotify();
+
+ /** Count of requests in flight */
+ virtual int getInProgressCount();
+
+ /** Cancel all captures in flight */
+ virtual int flushCapturesInProgress();
+
+ virtual int constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request);
+
+ /** Output stream creation and management */
+ virtual int allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers);
+
+ virtual int registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers);
+
+ virtual int releaseStream(uint32_t stream_id);
+
+ /** Input stream creation and management */
+ virtual int allocateReprocessStream(
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers);
+
+ virtual int allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id);
+
+ virtual int releaseReprocessStream(uint32_t stream_id);
+
+ /** 3A action triggering */
+ virtual int triggerAction(uint32_t trigger_id,
+ int32_t ext1, int32_t ext2);
+
+ /** Custom tag definitions */
+ virtual const char* getVendorSectionName(uint32_t tag);
+ virtual const char* getVendorTagName(uint32_t tag);
+ virtual int getVendorTagType(uint32_t tag);
+
+ /** Debug methods */
+
+ virtual int dump(int fd);
+
+ /****************************************************************************
+ * Camera API callbacks as defined by camera2_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera2.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera2 class defined in
+ * the 'camera_device2' parameter.
+ ***************************************************************************/
+
+private:
+ /** Input request queue */
+ static int set_request_queue_src_ops(const camera2_device_t *,
+ const camera2_request_queue_src_ops *queue_src_ops);
+ static int notify_request_queue_not_empty(const camera2_device_t *);
+
+ /** Output frame queue */
+ static int set_frame_queue_dst_ops(const camera2_device_t *,
+ const camera2_frame_queue_dst_ops *queue_dst_ops);
+
+ /** In-progress request management */
+ static int get_in_progress_count(const camera2_device_t *);
+
+ static int flush_captures_in_progress(const camera2_device_t *);
+
+ /** Request template creation */
+ static int construct_default_request(const camera2_device_t *,
+ int request_template,
+ camera_metadata_t **request);
+
+ /** Stream management */
+ static int allocate_stream(const camera2_device_t *,
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers);
+
+ static int register_stream_buffers(const camera2_device_t *,
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers);
+
+ static int release_stream(const camera2_device_t *,
+ uint32_t stream_id);
+
+ static int allocate_reprocess_stream(const camera2_device_t *,
+ uint32_t width,
+ uint32_t height,
+ uint32_t format,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id,
+ uint32_t *consumer_usage,
+ uint32_t *max_buffers);
+
+ static int allocate_reprocess_stream_from_stream(const camera2_device_t *,
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *reprocess_stream_ops,
+ uint32_t *stream_id);
+
+ static int release_reprocess_stream(const camera2_device_t *,
+ uint32_t stream_id);
+
+ /** 3A triggers*/
+ static int trigger_action(const camera2_device_t *,
+ uint32_t trigger_id,
+ int ext1,
+ int ext2);
+
+ /** Notifications to application */
+ static int set_notify_callback(const camera2_device_t *,
+ camera2_notify_callback notify_cb,
+ void *user);
+
+ /** Vendor metadata registration */
+ static int get_metadata_vendor_tag_ops(const camera2_device_t *,
+ vendor_tag_query_ops_t **ops);
+ // for get_metadata_vendor_tag_ops
+ static const char* get_camera_vendor_section_name(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+ static const char* get_camera_vendor_tag_name(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+ static int get_camera_vendor_tag_type(
+ const vendor_tag_query_ops_t *,
+ uint32_t tag);
+
+ static int dump(const camera2_device_t *, int fd);
+
+ /** For hw_device_t ops */
+ static int close(struct hw_device_t* device);
+
+ /****************************************************************************
+ * Data members shared with implementations
+ ***************************************************************************/
+ protected:
+ /** Mutex for calls through camera2 device interface */
+ Mutex mMutex;
+
+ bool mStatusPresent;
+
+ const camera2_request_queue_src_ops *mRequestQueueSrc;
+ const camera2_frame_queue_dst_ops *mFrameQueueDst;
+
+ struct TagOps : public vendor_tag_query_ops {
+ EmulatedCamera2 *parent;
+ };
+ TagOps mVendorTagOps;
+
+ void sendNotification(int32_t msgType,
+ int32_t ext1, int32_t ext2, int32_t ext3);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+ private:
+ static camera2_device_ops_t sDeviceOps;
+ camera2_notify_callback mNotifyCb;
+ void* mNotifyUserPtr;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera3.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera3.cpp
new file mode 100644
index 0000000..e9110cc
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera3.cpp
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Contains implementation of a class EmulatedCamera that encapsulates
+ * functionality common to all version 3.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera3_device_ops_t API.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera3_Camera"
+#include
+
+#include "EmulatedCamera3.h"
+#include "system/camera_metadata.h"
+
+namespace android {
+
+/**
+ * Constructs EmulatedCamera3 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+EmulatedCamera3::EmulatedCamera3(int cameraId,
+ struct hw_module_t* module):
+ EmulatedBaseCamera(cameraId,
+ CAMERA_DEVICE_API_VERSION_3_3,
+ &common,
+ module),
+ mStatus(STATUS_ERROR)
+{
+ common.close = EmulatedCamera3::close;
+ ops = &sDeviceOps;
+
+ mCallbackOps = NULL;
+
+}
+
+/* Destructs EmulatedCamera3 instance. */
+EmulatedCamera3::~EmulatedCamera3() {
+}
+
+/****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+/****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+status_t EmulatedCamera3::Initialize() {
+ ALOGV("%s", __FUNCTION__);
+
+ mStatus = STATUS_CLOSED;
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera API implementation
+ ***************************************************************************/
+
+status_t EmulatedCamera3::connectCamera(hw_device_t** device) {
+ ALOGV("%s", __FUNCTION__);
+ if (device == NULL) return BAD_VALUE;
+
+ if (mStatus != STATUS_CLOSED) {
+ ALOGE("%s: Trying to open a camera in state %d!",
+ __FUNCTION__, mStatus);
+ return INVALID_OPERATION;
+ }
+
+ *device = &common;
+ mStatus = STATUS_OPEN;
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera3::closeCamera() {
+ mStatus = STATUS_CLOSED;
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera3::getCameraInfo(struct camera_info* info) {
+ return EmulatedBaseCamera::getCameraInfo(info);
+}
+
+/****************************************************************************
+ * Camera Device API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+status_t EmulatedCamera3::initializeDevice(
+ const camera3_callback_ops *callbackOps) {
+ if (callbackOps == NULL) {
+ ALOGE("%s: NULL callback ops provided to HAL!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (mStatus != STATUS_OPEN) {
+ ALOGE("%s: Trying to initialize a camera in state %d!",
+ __FUNCTION__, mStatus);
+ return INVALID_OPERATION;
+ }
+
+ mCallbackOps = callbackOps;
+ mStatus = STATUS_READY;
+
+ return NO_ERROR;
+}
+
+status_t EmulatedCamera3::configureStreams(
+ camera3_stream_configuration *streamList) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+status_t EmulatedCamera3::registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+const camera_metadata_t* EmulatedCamera3::constructDefaultRequestSettings(
+ int type) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return NULL;
+}
+
+status_t EmulatedCamera3::processCaptureRequest(
+ camera3_capture_request *request) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+status_t EmulatedCamera3::flush() {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+/** Debug methods */
+
+void EmulatedCamera3::dump(int fd) {
+ ALOGE("%s: Not implemented", __FUNCTION__);
+ return;
+}
+
+/****************************************************************************
+ * Protected API. Callbacks to the framework.
+ ***************************************************************************/
+
+void EmulatedCamera3::sendCaptureResult(camera3_capture_result_t *result) {
+ mCallbackOps->process_capture_result(mCallbackOps, result);
+}
+
+void EmulatedCamera3::sendNotify(camera3_notify_msg_t *msg) {
+ mCallbackOps->notify(mCallbackOps, msg);
+}
+
+/****************************************************************************
+ * Private API.
+ ***************************************************************************/
+
+/****************************************************************************
+ * Camera API callbacks as defined by camera3_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera3.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera3 class defined by the
+ * 'camera_device3' parameter, or set a member value in the same.
+ ***************************************************************************/
+
+EmulatedCamera3* getInstance(const camera3_device_t *d) {
+ const EmulatedCamera3* cec = static_cast(d);
+ return const_cast(cec);
+}
+
+int EmulatedCamera3::initialize(const struct camera3_device *d,
+ const camera3_callback_ops_t *callback_ops) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->initializeDevice(callback_ops);
+}
+
+int EmulatedCamera3::configure_streams(const struct camera3_device *d,
+ camera3_stream_configuration_t *stream_list) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->configureStreams(stream_list);
+}
+
+int EmulatedCamera3::register_stream_buffers(
+ const struct camera3_device *d,
+ const camera3_stream_buffer_set_t *buffer_set) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->registerStreamBuffers(buffer_set);
+}
+
+int EmulatedCamera3::process_capture_request(
+ const struct camera3_device *d,
+ camera3_capture_request_t *request) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->processCaptureRequest(request);
+}
+
+const camera_metadata_t* EmulatedCamera3::construct_default_request_settings(
+ const camera3_device_t *d, int type) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->constructDefaultRequestSettings(type);
+}
+
+void EmulatedCamera3::dump(const camera3_device_t *d, int fd) {
+ EmulatedCamera3* ec = getInstance(d);
+ ec->dump(fd);
+}
+
+int EmulatedCamera3::flush(const camera3_device_t *d) {
+ EmulatedCamera3* ec = getInstance(d);
+ return ec->flush();
+}
+
+int EmulatedCamera3::close(struct hw_device_t* device) {
+ EmulatedCamera3* ec =
+ static_cast(
+ reinterpret_cast(device) );
+ if (ec == NULL) {
+ ALOGE("%s: Unexpected NULL camera3 device", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ return ec->closeCamera();
+}
+
+camera3_device_ops_t EmulatedCamera3::sDeviceOps = {
+ EmulatedCamera3::initialize,
+ EmulatedCamera3::configure_streams,
+ /* DEPRECATED: register_stream_buffers */ nullptr,
+ EmulatedCamera3::construct_default_request_settings,
+ EmulatedCamera3::process_capture_request,
+ /* DEPRECATED: get_metadata_vendor_tag_ops */ nullptr,
+ EmulatedCamera3::dump,
+ EmulatedCamera3::flush
+};
+
+const char* EmulatedCamera3::sAvailableCapabilitiesStrings[NUM_CAPABILITIES] = {
+ "BACKWARD_COMPATIBLE",
+ "MANUAL_SENSOR",
+ "MANUAL_POST_PROCESSING",
+ "RAW",
+ "PRIVATE_REPROCESSING",
+ "READ_SENSOR_SETTINGS",
+ "BURST_CAPTURE",
+ "YUV_REPROCESSING",
+ "DEPTH_OUTPUT",
+ "CONSTRAINED_HIGH_SPEED_VIDEO",
+ "FULL_LEVEL"
+};
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera3.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera3.h
new file mode 100644
index 0000000..9d2d7b2
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCamera3.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
+
+/**
+ * Contains declaration of a class EmulatedCamera that encapsulates
+ * functionality common to all version 3.0 emulated camera devices. Instances
+ * of this class (for each emulated camera) are created during the construction
+ * of the EmulatedCameraFactory instance. This class serves as an entry point
+ * for all camera API calls that defined by camera3_device_ops_t API.
+ */
+
+#include "hardware/camera3.h"
+#include "system/camera_metadata.h"
+#include "EmulatedBaseCamera.h"
+
+namespace android {
+
+/**
+ * Encapsulates functionality common to all version 3.0 emulated camera devices
+ *
+ * Note that EmulatedCameraFactory instantiates an object of this class just
+ * once, when EmulatedCameraFactory instance gets constructed. Connection to /
+ * disconnection from the actual camera device is handled by calls to
+ * connectDevice(), and closeCamera() methods of this class that are invoked in
+ * response to hw_module_methods_t::open, and camera_device::close callbacks.
+ */
+class EmulatedCamera3 : public camera3_device, public EmulatedBaseCamera {
+public:
+ /* Constructs EmulatedCamera3 instance.
+ * Param:
+ * cameraId - Zero based camera identifier, which is an index of the camera
+ * instance in camera factory's array.
+ * module - Emulated camera HAL module descriptor.
+ */
+ EmulatedCamera3(int cameraId,
+ struct hw_module_t* module);
+
+ /* Destructs EmulatedCamera2 instance. */
+ virtual ~EmulatedCamera3();
+
+ /* List of all defined capabilities plus useful HW levels */
+ enum AvailableCapabilities {
+ BACKWARD_COMPATIBLE,
+ MANUAL_SENSOR,
+ MANUAL_POST_PROCESSING,
+ RAW,
+ PRIVATE_REPROCESSING,
+ READ_SENSOR_SETTINGS,
+ BURST_CAPTURE,
+ YUV_REPROCESSING,
+ DEPTH_OUTPUT,
+ CONSTRAINED_HIGH_SPEED_VIDEO,
+ // Levels
+ FULL_LEVEL,
+
+ NUM_CAPABILITIES
+ };
+
+ // Char strings for above enum, with size NUM_CAPABILITIES
+ static const char *sAvailableCapabilitiesStrings[];
+
+ /****************************************************************************
+ * Abstract API
+ ***************************************************************************/
+
+public:
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+public:
+ virtual status_t Initialize();
+
+ /****************************************************************************
+ * Camera module API and generic hardware device API implementation
+ ***************************************************************************/
+
+public:
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info* info);
+
+ /****************************************************************************
+ * Camera API implementation.
+ * These methods are called from the camera API callback routines.
+ ***************************************************************************/
+
+protected:
+
+ virtual status_t initializeDevice(
+ const camera3_callback_ops *callbackOps);
+
+ virtual status_t configureStreams(
+ camera3_stream_configuration *streamList);
+
+ virtual status_t registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) ;
+
+ virtual const camera_metadata_t* constructDefaultRequestSettings(
+ int type);
+
+ virtual status_t processCaptureRequest(camera3_capture_request *request);
+
+ virtual status_t flush();
+
+ /** Debug methods */
+
+ virtual void dump(int fd);
+
+ /****************************************************************************
+ * Camera API callbacks as defined by camera3_device_ops structure. See
+ * hardware/libhardware/include/hardware/camera3.h for information on each
+ * of these callbacks. Implemented in this class, these callbacks simply
+ * dispatch the call into an instance of EmulatedCamera3 class defined in
+ * the 'camera_device3' parameter.
+ ***************************************************************************/
+
+private:
+
+ /** Startup */
+ static int initialize(const struct camera3_device *,
+ const camera3_callback_ops_t *callback_ops);
+
+ /** Stream configuration and buffer registration */
+
+ static int configure_streams(const struct camera3_device *,
+ camera3_stream_configuration_t *stream_list);
+
+ static int register_stream_buffers(const struct camera3_device *,
+ const camera3_stream_buffer_set_t *buffer_set);
+
+ /** Template request settings provision */
+
+ static const camera_metadata_t* construct_default_request_settings(
+ const struct camera3_device *, int type);
+
+ /** Submission of capture requests to HAL */
+
+ static int process_capture_request(const struct camera3_device *,
+ camera3_capture_request_t *request);
+
+ static void dump(const camera3_device_t *, int fd);
+
+ static int flush(const camera3_device_t *);
+
+ /** For hw_device_t ops */
+ static int close(struct hw_device_t* device);
+
+ /****************************************************************************
+ * Data members shared with implementations
+ ***************************************************************************/
+ protected:
+
+ enum {
+ // State at construction time, and after a device operation error
+ STATUS_ERROR = 0,
+ // State after startup-time init and after device instance close
+ STATUS_CLOSED,
+ // State after being opened, before device instance init
+ STATUS_OPEN,
+ // State after device instance initialization
+ STATUS_READY,
+ // State while actively capturing data
+ STATUS_ACTIVE
+ } mStatus;
+
+ /**
+ * Callbacks back to the framework
+ */
+
+ void sendCaptureResult(camera3_capture_result_t *result);
+ void sendNotify(camera3_notify_msg_t *msg);
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+ private:
+ static camera3_device_ops_t sDeviceOps;
+ const camera3_callback_ops_t *mCallbackOps;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraCommon.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraCommon.h
new file mode 100644
index 0000000..c1d575c
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraCommon.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H
+
+/*
+ * Contains common declarations that are used across the camera emulation.
+ */
+
+#include
+#include
+
+/* A helper class that tracks a routine execution.
+ * Basically, it dumps an enry message in its constructor, and an exit message
+ * in its destructor. Use LOGRE() macro (declared bellow) to create instances
+ * of this class at the beginning of the tracked routines / methods.
+ */
+class HWERoutineTracker {
+public:
+ /* Constructor that prints an "entry" trace message. */
+ explicit HWERoutineTracker(const char* name)
+ : mName(name) {
+ ALOGV("Entering %s", mName);
+ }
+
+ /* Destructor that prints a "leave" trace message. */
+ ~HWERoutineTracker() {
+ ALOGV("Leaving %s", mName);
+ }
+
+private:
+ /* Stores the routine name. */
+ const char* mName;
+};
+
+/* Logs an execution of a routine / method. */
+#define LOGRE() HWERoutineTracker hwertracker_##__LINE__(__FUNCTION__)
+
+/*
+ * min / max macros
+ */
+
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraDevice.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraDevice.cpp
new file mode 100644
index 0000000..c8e5640
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraDevice.cpp
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of an abstract class EmulatedCameraDevice that defines
+ * functionality expected from an emulated physical camera device:
+ * - Obtaining and setting camera parameters
+ * - Capturing frames
+ * - Streaming video
+ * - etc.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Device"
+#include
+#include
+#include
+#include "EmulatedCameraDevice.h"
+
+namespace android {
+
+const float GAMMA_CORRECTION = 2.2f;
+EmulatedCameraDevice::EmulatedCameraDevice(EmulatedCamera* camera_hal)
+ : mObjectLock(),
+ mCurFrameTimestamp(0),
+ mCameraHAL(camera_hal),
+ mCurrentFrame(NULL),
+ mExposureCompensation(1.0f),
+ mWhiteBalanceScale(NULL),
+ mSupportedWhiteBalanceScale(),
+ mState(ECDS_CONSTRUCTED)
+{
+}
+
+EmulatedCameraDevice::~EmulatedCameraDevice()
+{
+ ALOGV("EmulatedCameraDevice destructor");
+ if (mCurrentFrame != NULL) {
+ delete[] mCurrentFrame;
+ }
+ for (size_t i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) {
+ if (mSupportedWhiteBalanceScale.valueAt(i) != NULL) {
+ delete[] mSupportedWhiteBalanceScale.valueAt(i);
+ }
+ }
+}
+
+/****************************************************************************
+ * Emulated camera device public API
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::Initialize()
+{
+ if (isInitialized()) {
+ ALOGW("%s: Emulated camera device is already initialized: mState = %d",
+ __FUNCTION__, mState);
+ return NO_ERROR;
+ }
+
+ /* Instantiate worker thread object. */
+ mWorkerThread = new WorkerThread(this);
+ if (getWorkerThread() == NULL) {
+ ALOGE("%s: Unable to instantiate worker thread object", __FUNCTION__);
+ return ENOMEM;
+ }
+
+ mState = ECDS_INITIALIZED;
+
+ return NO_ERROR;
+}
+
+status_t EmulatedCameraDevice::startDeliveringFrames(bool one_burst)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isStarted()) {
+ ALOGE("%s: Device is not started", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* Frames will be delivered from the thread routine. */
+ const status_t res = startWorkerThread(one_burst);
+ ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+ return res;
+}
+
+status_t EmulatedCameraDevice::stopDeliveringFrames()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isStarted()) {
+ ALOGW("%s: Device is not started", __FUNCTION__);
+ return NO_ERROR;
+ }
+
+ const status_t res = stopWorkerThread();
+ ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+ return res;
+}
+
+void EmulatedCameraDevice::setExposureCompensation(const float ev) {
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isStarted()) {
+ ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
+ }
+
+ mExposureCompensation = std::pow(2.0f, ev / GAMMA_CORRECTION);
+ ALOGV("New exposure compensation is %f", mExposureCompensation);
+}
+
+void EmulatedCameraDevice::initializeWhiteBalanceModes(const char* mode,
+ const float r_scale,
+ const float b_scale) {
+ ALOGV("%s with %s, %f, %f", __FUNCTION__, mode, r_scale, b_scale);
+ float* value = new float[3];
+ value[0] = r_scale; value[1] = 1.0f; value[2] = b_scale;
+ mSupportedWhiteBalanceScale.add(String8(mode), value);
+}
+
+void EmulatedCameraDevice::setWhiteBalanceMode(const char* mode) {
+ ALOGV("%s with white balance %s", __FUNCTION__, mode);
+ mWhiteBalanceScale =
+ mSupportedWhiteBalanceScale.valueFor(String8(mode));
+}
+
+/* Computes the pixel value after adjusting the white balance to the current
+ * one. The input the y, u, v channel of the pixel and the adjusted value will
+ * be stored in place. The adjustment is done in RGB space.
+ */
+void EmulatedCameraDevice::changeWhiteBalance(uint8_t& y,
+ uint8_t& u,
+ uint8_t& v) const {
+ float r_scale = mWhiteBalanceScale[0];
+ float b_scale = mWhiteBalanceScale[2];
+ int r = static_cast(YUV2R(y, u, v)) / r_scale;
+ int g = YUV2G(y, u, v);
+ int b = static_cast(YUV2B(y, u, v)) / b_scale;
+
+ y = RGB2Y(r, g, b);
+ u = RGB2U(r, g, b);
+ v = RGB2V(r, g, b);
+}
+
+status_t EmulatedCameraDevice::getCurrentPreviewFrame(void* buffer)
+{
+ if (!isStarted()) {
+ ALOGE("%s: Device is not started", __FUNCTION__);
+ return EINVAL;
+ }
+ if (mCurrentFrame == NULL || buffer == NULL) {
+ ALOGE("%s: No framebuffer", __FUNCTION__);
+ return EINVAL;
+ }
+
+ /* In emulation the framebuffer is never RGB. */
+ switch (mPixelFormat) {
+ case V4L2_PIX_FMT_YVU420:
+ YV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+ case V4L2_PIX_FMT_YUV420:
+ YU12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+ case V4L2_PIX_FMT_NV21:
+ NV21ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+ case V4L2_PIX_FMT_NV12:
+ NV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+
+ default:
+ ALOGE("%s: Unknown pixel format %.4s",
+ __FUNCTION__, reinterpret_cast(&mPixelFormat));
+ return EINVAL;
+ }
+}
+
+/****************************************************************************
+ * Emulated camera device private API
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::commonStartDevice(int width,
+ int height,
+ uint32_t pix_fmt)
+{
+ /* Validate pixel format, and calculate framebuffer size at the same time. */
+ switch (pix_fmt) {
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV12:
+ mFrameBufferSize = (width * height * 12) / 8;
+ break;
+
+ default:
+ ALOGE("%s: Unknown pixel format %.4s",
+ __FUNCTION__, reinterpret_cast(&pix_fmt));
+ return EINVAL;
+ }
+
+ /* Cache framebuffer info. */
+ mFrameWidth = width;
+ mFrameHeight = height;
+ mPixelFormat = pix_fmt;
+ mTotalPixels = width * height;
+
+ /* Allocate framebuffer. */
+ mCurrentFrame = new uint8_t[mFrameBufferSize];
+ if (mCurrentFrame == NULL) {
+ ALOGE("%s: Unable to allocate framebuffer", __FUNCTION__);
+ return ENOMEM;
+ }
+ ALOGV("%s: Allocated %p %zu bytes for %d pixels in %.4s[%dx%d] frame",
+ __FUNCTION__, mCurrentFrame, mFrameBufferSize, mTotalPixels,
+ reinterpret_cast(&mPixelFormat), mFrameWidth, mFrameHeight);
+ return NO_ERROR;
+}
+
+void EmulatedCameraDevice::commonStopDevice()
+{
+ mFrameWidth = mFrameHeight = mTotalPixels = 0;
+ mPixelFormat = 0;
+
+ if (mCurrentFrame != NULL) {
+ delete[] mCurrentFrame;
+ mCurrentFrame = NULL;
+ }
+}
+
+/****************************************************************************
+ * Worker thread management.
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::startWorkerThread(bool one_burst)
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isInitialized()) {
+ ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
+ return EINVAL;
+ }
+
+ const status_t res = getWorkerThread()->startThread(one_burst);
+ ALOGE_IF(res != NO_ERROR, "%s: Unable to start worker thread", __FUNCTION__);
+ return res;
+}
+
+status_t EmulatedCameraDevice::stopWorkerThread()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (!isInitialized()) {
+ ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
+ return EINVAL;
+ }
+
+ const status_t res = getWorkerThread()->stopThread();
+ ALOGE_IF(res != NO_ERROR, "%s: Unable to stop worker thread", __FUNCTION__);
+ return res;
+}
+
+bool EmulatedCameraDevice::inWorkerThread()
+{
+ /* This will end the thread loop, and will terminate the thread. Derived
+ * classes must override this method. */
+ return false;
+}
+
+/****************************************************************************
+ * Worker thread implementation.
+ ***************************************************************************/
+
+status_t EmulatedCameraDevice::WorkerThread::readyToRun()
+{
+ ALOGV("Starting emulated camera device worker thread...");
+
+ ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
+ "%s: Thread control FDs are opened", __FUNCTION__);
+ /* Create a pair of FDs that would be used to control the thread. */
+ int thread_fds[2];
+ status_t ret;
+ Mutex::Autolock lock(mCameraDevice->mObjectLock);
+ if (pipe(thread_fds) == 0) {
+ mThreadControl = thread_fds[1];
+ mControlFD = thread_fds[0];
+ ALOGV("Emulated device's worker thread has been started.");
+ ret = NO_ERROR;
+ } else {
+ ALOGE("%s: Unable to create thread control FDs: %d -> %s",
+ __FUNCTION__, errno, strerror(errno));
+ ret = errno;
+ }
+
+ mSetup.signal();
+ return ret;
+}
+
+status_t EmulatedCameraDevice::WorkerThread::stopThread()
+{
+ ALOGV("Stopping emulated camera device's worker thread...");
+
+ status_t res = EINVAL;
+
+ // Limit the scope of the Autolock
+ {
+ // If thread is running and readyToRun() has not finished running,
+ // then wait until it is done.
+ Mutex::Autolock lock(mCameraDevice->mObjectLock);
+ if (isRunning() && (mThreadControl < 0 || mControlFD < 0)) {
+ mSetup.wait(mCameraDevice->mObjectLock);
+ }
+ }
+
+ if (mThreadControl >= 0) {
+ /* Send "stop" message to the thread loop. */
+ const ControlMessage msg = THREAD_STOP;
+ const int wres =
+ TEMP_FAILURE_RETRY(write(mThreadControl, &msg, sizeof(msg)));
+ if (wres == sizeof(msg)) {
+ /* Stop the thread, and wait till it's terminated. */
+ res = requestExitAndWait();
+ if (res == NO_ERROR) {
+ /* Close control FDs. */
+ if (mThreadControl >= 0) {
+ close(mThreadControl);
+ mThreadControl = -1;
+ }
+ if (mControlFD >= 0) {
+ close(mControlFD);
+ mControlFD = -1;
+ }
+ ALOGV("Emulated camera device's worker thread has been stopped.");
+ } else {
+ ALOGE("%s: requestExitAndWait failed: %d -> %s",
+ __FUNCTION__, res, strerror(-res));
+ }
+ } else {
+ ALOGE("%s: Unable to send THREAD_STOP message: %d -> %s",
+ __FUNCTION__, errno, strerror(errno));
+ res = errno ? errno : EINVAL;
+ }
+ } else {
+ ALOGE("%s: Thread control FDs are not opened", __FUNCTION__);
+ }
+
+ return res;
+}
+
+EmulatedCameraDevice::WorkerThread::SelectRes
+EmulatedCameraDevice::WorkerThread::Select(int fd, int timeout)
+{
+ fd_set fds[1];
+ struct timeval tv, *tvp = NULL;
+
+ const int fd_num = (fd >= 0) ? max(fd, mControlFD) + 1 :
+ mControlFD + 1;
+ FD_ZERO(fds);
+ FD_SET(mControlFD, fds);
+ if (fd >= 0) {
+ FD_SET(fd, fds);
+ }
+ if (timeout) {
+ tv.tv_sec = timeout / 1000000;
+ tv.tv_usec = timeout % 1000000;
+ tvp = &tv;
+ }
+ int res = TEMP_FAILURE_RETRY(select(fd_num, fds, NULL, NULL, tvp));
+ if (res < 0) {
+ ALOGE("%s: select returned %d and failed: %d -> %s",
+ __FUNCTION__, res, errno, strerror(errno));
+ return ERROR;
+ } else if (res == 0) {
+ /* Timeout. */
+ return TIMEOUT;
+ } else if (FD_ISSET(mControlFD, fds)) {
+ /* A control event. Lets read the message. */
+ ControlMessage msg;
+ res = TEMP_FAILURE_RETRY(read(mControlFD, &msg, sizeof(msg)));
+ if (res != sizeof(msg)) {
+ ALOGE("%s: Unexpected message size %d, or an error %d -> %s",
+ __FUNCTION__, res, errno, strerror(errno));
+ return ERROR;
+ }
+ /* THREAD_STOP is the only message expected here. */
+ if (msg == THREAD_STOP) {
+ ALOGV("%s: THREAD_STOP message is received", __FUNCTION__);
+ return EXIT_THREAD;
+ } else {
+ ALOGE("Unknown worker thread message %d", msg);
+ return ERROR;
+ }
+ } else {
+ /* Must be an FD. */
+ ALOGW_IF(fd < 0 || !FD_ISSET(fd, fds), "%s: Undefined 'select' result",
+ __FUNCTION__);
+ return READY;
+ }
+}
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraDevice.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraDevice.h
new file mode 100644
index 0000000..fff11fa
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraDevice.h
@@ -0,0 +1,546 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H
+
+/*
+ * Contains declaration of an abstract class EmulatedCameraDevice that defines
+ * functionality expected from an emulated physical camera device:
+ * - Obtaining and setting camera device parameters
+ * - Capturing frames
+ * - Streaming video
+ * - etc.
+ */
+
+#include
+#include
+#include
+#include "EmulatedCameraCommon.h"
+#include "Converters.h"
+
+namespace android {
+
+class EmulatedCamera;
+
+/* Encapsulates an abstract class EmulatedCameraDevice that defines
+ * functionality expected from an emulated physical camera device:
+ * - Obtaining and setting camera device parameters
+ * - Capturing frames
+ * - Streaming video
+ * - etc.
+ */
+class EmulatedCameraDevice {
+public:
+ /* Constructs EmulatedCameraDevice instance.
+ * Param:
+ * camera_hal - Emulated camera that implements the camera HAL API, and
+ * manages (contains) this object.
+ */
+ explicit EmulatedCameraDevice(EmulatedCamera* camera_hal);
+
+ /* Destructs EmulatedCameraDevice instance. */
+ virtual ~EmulatedCameraDevice();
+
+ /***************************************************************************
+ * Emulated camera device abstract interface
+ **************************************************************************/
+
+public:
+ /* Connects to the camera device.
+ * This method must be called on an initialized instance of this class.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t connectDevice() = 0;
+
+ /* Disconnects from the camera device.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status. If this method is
+ * called for already disconnected, or uninitialized instance of this class,
+ * a successful status must be returned from this method. If this method is
+ * called for an instance that is in the "started" state, this method must
+ * return a failure.
+ */
+ virtual status_t disconnectDevice() = 0;
+
+ /* Starts the camera device.
+ * This method tells the camera device to start capturing frames of the given
+ * dimensions for the given pixel format. Note that this method doesn't start
+ * the delivery of the captured frames to the emulated camera. Call
+ * startDeliveringFrames method to start delivering frames. This method must
+ * be called on a connected instance of this class. If it is called on a
+ * disconnected instance, this method must return a failure.
+ * Param:
+ * width, height - Frame dimensions to use when capturing video frames.
+ * pix_fmt - Pixel format to use when capturing video frames.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t startDevice(int width, int height, uint32_t pix_fmt) = 0;
+
+ /* Stops the camera device.
+ * This method tells the camera device to stop capturing frames. Note that
+ * this method doesn't stop delivering frames to the emulated camera. Always
+ * call stopDeliveringFrames prior to calling this method.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status. If this method is
+ * called for an object that is not capturing frames, or is disconnected,
+ * or is uninitialized, a successful status must be returned from this
+ * method.
+ */
+ virtual status_t stopDevice() = 0;
+
+ /***************************************************************************
+ * Emulated camera device public API
+ **************************************************************************/
+
+public:
+ /* Initializes EmulatedCameraDevice instance.
+ * Derived classes should override this method in order to cache static
+ * properties of the physical device (list of supported pixel formats, frame
+ * sizes, etc.) If this method is called on an already initialized instance,
+ * it must return a successful status.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t Initialize();
+
+ /* Initializes the white balance modes parameters.
+ * The parameters are passed by each individual derived camera API to
+ * represent that different camera manufacturers may have different
+ * preferences on the white balance parameters. Green channel in the RGB
+ * color space is fixed to keep the luminance to be reasonably constant.
+ *
+ * Param:
+ * mode the text describing the current white balance mode
+ * r_scale the scale factor for the R channel in RGB space
+ * b_scale the scale factor for the B channel in RGB space.
+ */
+ void initializeWhiteBalanceModes(const char* mode,
+ const float r_scale,
+ const float b_scale);
+
+ /* Starts delivering frames captured from the camera device.
+ * This method will start the worker thread that would be pulling frames from
+ * the camera device, and will deliver the pulled frames back to the emulated
+ * camera via onNextFrameAvailable callback. This method must be called on a
+ * connected instance of this class with a started camera device. If it is
+ * called on a disconnected instance, or camera device has not been started,
+ * this method must return a failure.
+ * Param:
+ * one_burst - Controls how many frames should be delivered. If this
+ * parameter is 'true', only one captured frame will be delivered to the
+ * emulated camera. If this parameter is 'false', frames will keep
+ * coming until stopDeliveringFrames method is called. Typically, this
+ * parameter is set to 'true' only in order to obtain a single frame
+ * that will be used as a "picture" in takePicture method of the
+ * emulated camera.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t startDeliveringFrames(bool one_burst);
+
+ /* Stops delivering frames captured from the camera device.
+ * This method will stop the worker thread started by startDeliveringFrames.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t stopDeliveringFrames();
+
+ /* Sets the exposure compensation for the camera device.
+ */
+ void setExposureCompensation(const float ev);
+
+ /* Sets the white balance mode for the device.
+ */
+ void setWhiteBalanceMode(const char* mode);
+
+ /* Gets current framebuffer, converted into preview frame format.
+ * This method must be called on a connected instance of this class with a
+ * started camera device. If it is called on a disconnected instance, or
+ * camera device has not been started, this method must return a failure.
+ * Note that this method should be called only after at least one frame has
+ * been captured and delivered. Otherwise it will return garbage in the
+ * preview frame buffer. Typically, this method shuld be called from
+ * onNextFrameAvailable callback.
+ * Param:
+ * buffer - Buffer, large enough to contain the entire preview frame.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t getCurrentPreviewFrame(void* buffer);
+
+ /* Gets width of the frame obtained from the physical device.
+ * Return:
+ * Width of the frame obtained from the physical device. Note that value
+ * returned from this method is valid only in case if camera device has been
+ * started.
+ */
+ inline int getFrameWidth() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mFrameWidth;
+ }
+
+ /* Gets height of the frame obtained from the physical device.
+ * Return:
+ * Height of the frame obtained from the physical device. Note that value
+ * returned from this method is valid only in case if camera device has been
+ * started.
+ */
+ inline int getFrameHeight() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mFrameHeight;
+ }
+
+ /* Gets byte size of the current frame buffer.
+ * Return:
+ * Byte size of the frame buffer. Note that value returned from this method
+ * is valid only in case if camera device has been started.
+ */
+ inline size_t getFrameBufferSize() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mFrameBufferSize;
+ }
+
+ /* Gets number of pixels in the current frame buffer.
+ * Return:
+ * Number of pixels in the frame buffer. Note that value returned from this
+ * method is valid only in case if camera device has been started.
+ */
+ inline int getPixelNum() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mTotalPixels;
+ }
+
+ /* Gets pixel format of the frame that camera device streams to this class.
+ * Throughout camera framework, there are three different forms of pixel
+ * format representation:
+ * - Original format, as reported by the actual camera device. Values for
+ * this format are declared in bionic/libc/kernel/common/linux/videodev2.h
+ * - String representation as defined in CameraParameters::PIXEL_FORMAT_XXX
+ * strings in frameworks/base/include/camera/CameraParameters.h
+ * - HAL_PIXEL_FORMAT_XXX format, as defined in system/core/include/system/graphics.h
+ * Since emulated camera device gets its data from the actual device, it gets
+ * pixel format in the original form. And that's the pixel format
+ * representation that will be returned from this method. HAL components will
+ * need to translate value returned from this method to the appropriate form.
+ * This method must be called only on started instance of this class, since
+ * it's applicable only when camera device is ready to stream frames.
+ * Param:
+ * pix_fmt - Upon success contains the original pixel format.
+ * Return:
+ * Current framebuffer's pixel format. Note that value returned from this
+ * method is valid only in case if camera device has been started.
+ */
+ inline uint32_t getOriginalPixelFormat() const
+ {
+ ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+ return mPixelFormat;
+ }
+
+ /*
+ * State checkers.
+ */
+
+ inline bool isInitialized() const {
+ /* Instance is initialized when the worker thread has been successfuly
+ * created (but not necessarily started). */
+ return mWorkerThread.get() != NULL && mState != ECDS_CONSTRUCTED;
+ }
+ inline bool isConnected() const {
+ /* Instance is connected when its status is either"connected", or
+ * "started". */
+ return mState == ECDS_CONNECTED || mState == ECDS_STARTED;
+ }
+ inline bool isStarted() const {
+ return mState == ECDS_STARTED;
+ }
+
+ /****************************************************************************
+ * Emulated camera device private API
+ ***************************************************************************/
+protected:
+ /* Performs common validation and calculation of startDevice parameters.
+ * Param:
+ * width, height, pix_fmt - Parameters passed to the startDevice method.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t commonStartDevice(int width, int height, uint32_t pix_fmt);
+
+ /* Performs common cleanup on stopDevice.
+ * This method will undo what commonStartDevice had done.
+ */
+ virtual void commonStopDevice();
+
+ /** Computes a luminance value after taking the exposure compensation.
+ * value into account.
+ *
+ * Param:
+ * inputY - The input luminance value.
+ * Return:
+ * The luminance value after adjusting the exposure compensation.
+ */
+ inline uint8_t changeExposure(const uint8_t& inputY) const {
+ return static_cast(clamp(static_cast(inputY) *
+ mExposureCompensation));
+ }
+
+ /** Computes the pixel value in YUV space after adjusting to the current
+ * white balance mode.
+ */
+ void changeWhiteBalance(uint8_t& y, uint8_t& u, uint8_t& v) const;
+
+ /****************************************************************************
+ * Worker thread management.
+ * Typicaly when emulated camera device starts capturing frames from the
+ * actual device, it does that in a worker thread created in StartCapturing,
+ * and terminated in StopCapturing. Since this is such a typical scenario,
+ * it makes sence to encapsulate worker thread management in the base class
+ * for all emulated camera devices.
+ ***************************************************************************/
+
+protected:
+ /* Starts the worker thread.
+ * Typically, worker thread is started from startDeliveringFrames method of
+ * this class.
+ * Param:
+ * one_burst - Controls how many times thread loop should run. If this
+ * parameter is 'true', thread routine will run only once If this
+ * parameter is 'false', thread routine will run until stopWorkerThread
+ * method is called. See startDeliveringFrames for more info.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t startWorkerThread(bool one_burst);
+
+ /* Stops the worker thread.
+ * Note that this method will always wait for the worker thread to terminate.
+ * Typically, worker thread is started from stopDeliveringFrames method of
+ * this class.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ virtual status_t stopWorkerThread();
+
+ /* Implementation of the worker thread routine.
+ * In the default implementation of the worker thread routine we simply
+ * return 'false' forcing the thread loop to exit, and the thread to
+ * terminate. Derived class should override that method to provide there the
+ * actual frame delivery.
+ * Return:
+ * true To continue thread loop (this method will be called again), or false
+ * to exit the thread loop and to terminate the thread.
+ */
+ virtual bool inWorkerThread();
+
+ /* Encapsulates a worker thread used by the emulated camera device.
+ */
+ friend class WorkerThread;
+ class WorkerThread : public Thread {
+
+ /****************************************************************************
+ * Public API
+ ***************************************************************************/
+
+ public:
+ inline explicit WorkerThread(EmulatedCameraDevice* camera_dev)
+ : Thread(true), // Callbacks may involve Java calls.
+ mCameraDevice(camera_dev),
+ mThreadControl(-1),
+ mControlFD(-1)
+ {
+ }
+
+ inline ~WorkerThread()
+ {
+ ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
+ "%s: Control FDs are opened in the destructor",
+ __FUNCTION__);
+ if (mThreadControl >= 0) {
+ close(mThreadControl);
+ }
+ if (mControlFD >= 0) {
+ close(mControlFD);
+ }
+ }
+
+ /* Starts the thread
+ * Param:
+ * one_burst - Controls how many times thread loop should run. If
+ * this parameter is 'true', thread routine will run only once
+ * If this parameter is 'false', thread routine will run until
+ * stopThread method is called. See startWorkerThread for more
+ * info.
+ * Return:
+ * NO_ERROR on success, or an appropriate error status.
+ */
+ inline status_t startThread(bool one_burst)
+ {
+ mOneBurst = one_burst;
+ return run("Camera_startThread", ANDROID_PRIORITY_URGENT_DISPLAY, 0);
+ }
+
+ /* Overriden base class method.
+ * It is overriden in order to provide one-time initialization just
+ * prior to starting the thread routine.
+ */
+ status_t readyToRun();
+
+ /* Stops the thread. */
+ status_t stopThread();
+
+ /* Values returned from the Select method of this class. */
+ enum SelectRes {
+ /* A timeout has occurred. */
+ TIMEOUT,
+ /* Data are available for read on the provided FD. */
+ READY,
+ /* Thread exit request has been received. */
+ EXIT_THREAD,
+ /* An error has occurred. */
+ ERROR
+ };
+
+ /* Select on an FD event, keeping in mind thread exit message.
+ * Param:
+ * fd - File descriptor on which to wait for an event. This
+ * parameter may be negative. If it is negative this method will
+ * only wait on a control message to the thread.
+ * timeout - Timeout in microseconds. 0 indicates no timeout (wait
+ * forever).
+ * Return:
+ * See SelectRes enum comments.
+ */
+ SelectRes Select(int fd, int timeout);
+
+ /****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+ private:
+ /* Implements abstract method of the base Thread class. */
+ bool threadLoop()
+ {
+ /* Simply dispatch the call to the containing camera device. */
+ if (mCameraDevice->inWorkerThread()) {
+ /* Respect "one burst" parameter (see startThread). */
+ return !mOneBurst;
+ } else {
+ return false;
+ }
+ }
+
+ /* Containing camera device object. */
+ EmulatedCameraDevice* mCameraDevice;
+
+ /* FD that is used to send control messages into the thread. */
+ int mThreadControl;
+
+ /* FD that thread uses to receive control messages. */
+ int mControlFD;
+
+ /* Controls number of times the thread loop runs.
+ * See startThread for more information. */
+ bool mOneBurst;
+
+ /* Enumerates control messages that can be sent into the thread. */
+ enum ControlMessage {
+ /* Stop the thread. */
+ THREAD_STOP
+ };
+
+ Condition mSetup;
+ };
+
+ /* Worker thread accessor. */
+ inline WorkerThread* getWorkerThread() const
+ {
+ return mWorkerThread.get();
+ }
+
+ /****************************************************************************
+ * Data members
+ ***************************************************************************/
+
+protected:
+ /* Locks this instance for parameters, state, etc. change. */
+ Mutex mObjectLock;
+
+ /* Worker thread that is used in frame capturing. */
+ sp mWorkerThread;
+
+ /* Timestamp of the current frame. */
+ nsecs_t mCurFrameTimestamp;
+
+ /* Emulated camera object containing this instance. */
+ EmulatedCamera* mCameraHAL;
+
+ /* Framebuffer containing the current frame. */
+ uint8_t* mCurrentFrame;
+
+ /*
+ * Framebuffer properties.
+ */
+
+ /* Byte size of the framebuffer. */
+ size_t mFrameBufferSize;
+
+ /* Original pixel format (one of the V4L2_PIX_FMT_XXX values, as defined in
+ * bionic/libc/kernel/common/linux/videodev2.h */
+ uint32_t mPixelFormat;
+
+ /* Frame width */
+ int mFrameWidth;
+
+ /* Frame height */
+ int mFrameHeight;
+
+ /* Total number of pixels */
+ int mTotalPixels;
+
+ /* Exposure compensation value */
+ float mExposureCompensation;
+
+ float* mWhiteBalanceScale;
+
+ DefaultKeyedVector mSupportedWhiteBalanceScale;
+
+ /* Defines possible states of the emulated camera device object.
+ */
+ enum EmulatedCameraDeviceState {
+ /* Object has been constructed. */
+ ECDS_CONSTRUCTED,
+ /* Object has been initialized. */
+ ECDS_INITIALIZED,
+ /* Object has been connected to the physical device. */
+ ECDS_CONNECTED,
+ /* Camera device has been started. */
+ ECDS_STARTED,
+ };
+
+ /* Object state. */
+ EmulatedCameraDeviceState mState;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraFactory.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraFactory.cpp
new file mode 100644
index 0000000..137d8ab
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraFactory.cpp
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedCameraFactory that manages cameras
+ * available for emulation.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Factory"
+#include
+#include
+#include "EmulatedQemuCamera.h"
+#include "EmulatedFakeCamera.h"
+#include "EmulatedFakeCamera2.h"
+#include "EmulatedFakeCamera3.h"
+#include "EmulatedCameraHotplugThread.h"
+#include "EmulatedCameraFactory.h"
+
+extern camera_module_t HAL_MODULE_INFO_SYM;
+
+/* A global instance of EmulatedCameraFactory is statically instantiated and
+ * initialized when camera emulation HAL is loaded.
+ */
+android::EmulatedCameraFactory gEmulatedCameraFactory;
+
+namespace android {
+
+EmulatedCameraFactory::EmulatedCameraFactory()
+ : mQemuClient(),
+ mEmulatedCameras(NULL),
+ mEmulatedCameraNum(0),
+ mFakeCameraNum(0),
+ mConstructedOK(false),
+ mCallbacks(NULL)
+{
+ status_t res;
+ /* Connect to the factory service in the emulator, and create Qemu cameras. */
+ if (mQemuClient.connectClient(NULL) == NO_ERROR) {
+ /* Connection has succeeded. Create emulated cameras for each camera
+ * device, reported by the service. */
+ createQemuCameras();
+ }
+
+ if (isBackFakeCameraEmulationOn()) {
+ /* Camera ID. */
+ const int camera_id = mEmulatedCameraNum;
+ /* Use fake camera to emulate back-facing camera. */
+ mEmulatedCameraNum++;
+
+ /* Make sure that array is allocated (in case there were no 'qemu'
+ * cameras created. Note that we preallocate the array so it may contain
+ * two fake cameras: one facing back, and another facing front. */
+ if (mEmulatedCameras == NULL) {
+ mEmulatedCameras = new EmulatedBaseCamera*[mEmulatedCameraNum + 1];
+ if (mEmulatedCameras == NULL) {
+ ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+ __FUNCTION__, mEmulatedCameraNum);
+ return;
+ }
+ memset(mEmulatedCameras, 0,
+ (mEmulatedCameraNum + 1) * sizeof(EmulatedBaseCamera*));
+ }
+
+ /* Create, and initialize the fake camera */
+ switch (getBackCameraHalVersion()) {
+ case 1:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera(camera_id, true,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 2:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera2(camera_id, true,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 3:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera3(camera_id, true,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ default:
+ ALOGE("%s: Unknown back camera hal version requested: %d", __FUNCTION__,
+ getBackCameraHalVersion());
+ }
+ if (mEmulatedCameras[camera_id] != NULL) {
+ ALOGV("%s: Back camera device version is %d", __FUNCTION__,
+ getBackCameraHalVersion());
+ res = mEmulatedCameras[camera_id]->Initialize();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to intialize back camera %d: %s (%d)",
+ __FUNCTION__, camera_id, strerror(-res), res);
+ delete mEmulatedCameras[camera_id];
+ mEmulatedCameraNum--;
+ }
+ } else {
+ mEmulatedCameraNum--;
+ ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
+ }
+ }
+
+ if (isFrontFakeCameraEmulationOn()) {
+ /* Camera ID. */
+ const int camera_id = mEmulatedCameraNum;
+ /* Use fake camera to emulate front-facing camera. */
+ mEmulatedCameraNum++;
+
+ /* Make sure that array is allocated (in case there were no 'qemu'
+ * cameras created. */
+ if (mEmulatedCameras == NULL) {
+ mEmulatedCameras = new EmulatedBaseCamera*[mEmulatedCameraNum];
+ if (mEmulatedCameras == NULL) {
+ ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+ __FUNCTION__, mEmulatedCameraNum);
+ return;
+ }
+ memset(mEmulatedCameras, 0,
+ mEmulatedCameraNum * sizeof(EmulatedBaseCamera*));
+ }
+
+ /* Create, and initialize the fake camera */
+ switch (getFrontCameraHalVersion()) {
+ case 1:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera(camera_id, false,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 2:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera2(camera_id, false,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ case 3:
+ mEmulatedCameras[camera_id] =
+ new EmulatedFakeCamera3(camera_id, false,
+ &HAL_MODULE_INFO_SYM.common);
+ break;
+ default:
+ ALOGE("%s: Unknown front camera hal version requested: %d",
+ __FUNCTION__,
+ getFrontCameraHalVersion());
+ }
+ if (mEmulatedCameras[camera_id] != NULL) {
+ ALOGV("%s: Front camera device version is %d", __FUNCTION__,
+ getFrontCameraHalVersion());
+ res = mEmulatedCameras[camera_id]->Initialize();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to intialize front camera %d: %s (%d)",
+ __FUNCTION__, camera_id, strerror(-res), res);
+ delete mEmulatedCameras[camera_id];
+ mEmulatedCameraNum--;
+ }
+ } else {
+ mEmulatedCameraNum--;
+ ALOGE("%s: Unable to instantiate fake camera class", __FUNCTION__);
+ }
+ }
+
+ ALOGV("%d cameras are being emulated. %d of them are fake cameras.",
+ mEmulatedCameraNum, mFakeCameraNum);
+
+ /* Create hotplug thread */
+ {
+ Vector cameraIdVector;
+ for (int i = 0; i < mEmulatedCameraNum; ++i) {
+ cameraIdVector.push_back(i);
+ }
+ mHotplugThread = new EmulatedCameraHotplugThread(&cameraIdVector[0],
+ mEmulatedCameraNum);
+ mHotplugThread->run("EmulatedCameraHotplugThread");
+ }
+
+ mConstructedOK = true;
+}
+
+EmulatedCameraFactory::~EmulatedCameraFactory()
+{
+ if (mEmulatedCameras != NULL) {
+ for (int n = 0; n < mEmulatedCameraNum; n++) {
+ if (mEmulatedCameras[n] != NULL) {
+ delete mEmulatedCameras[n];
+ }
+ }
+ delete[] mEmulatedCameras;
+ }
+
+ if (mHotplugThread != NULL) {
+ mHotplugThread->requestExit();
+ mHotplugThread->join();
+ }
+}
+
+/****************************************************************************
+ * Camera HAL API handlers.
+ *
+ * Each handler simply verifies existence of an appropriate EmulatedBaseCamera
+ * instance, and dispatches the call to that instance.
+ *
+ ***************************************************************************/
+
+int EmulatedCameraFactory::cameraDeviceOpen(int camera_id, hw_device_t** device)
+{
+ ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+
+ *device = NULL;
+
+ if (!isConstructedOK()) {
+ ALOGE("%s: EmulatedCameraFactory has failed to initialize", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (camera_id < 0 || camera_id >= getEmulatedCameraNum()) {
+ ALOGE("%s: Camera id %d is out of bounds (%d)",
+ __FUNCTION__, camera_id, getEmulatedCameraNum());
+ return -ENODEV;
+ }
+
+ return mEmulatedCameras[camera_id]->connectCamera(device);
+}
+
+int EmulatedCameraFactory::getCameraInfo(int camera_id, struct camera_info* info)
+{
+ ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+
+ if (!isConstructedOK()) {
+ ALOGE("%s: EmulatedCameraFactory has failed to initialize", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (camera_id < 0 || camera_id >= getEmulatedCameraNum()) {
+ ALOGE("%s: Camera id %d is out of bounds (%d)",
+ __FUNCTION__, camera_id, getEmulatedCameraNum());
+ return -ENODEV;
+ }
+
+ return mEmulatedCameras[camera_id]->getCameraInfo(info);
+}
+
+int EmulatedCameraFactory::setCallbacks(
+ const camera_module_callbacks_t *callbacks)
+{
+ ALOGV("%s: callbacks = %p", __FUNCTION__, callbacks);
+
+ mCallbacks = callbacks;
+
+ return OK;
+}
+
+void EmulatedCameraFactory::getVendorTagOps(vendor_tag_ops_t* ops) {
+ ALOGV("%s: ops = %p", __FUNCTION__, ops);
+
+ // No vendor tags defined for emulator yet, so not touching ops
+}
+
+/****************************************************************************
+ * Camera HAL API callbacks.
+ ***************************************************************************/
+
+int EmulatedCameraFactory::device_open(const hw_module_t* module,
+ const char* name,
+ hw_device_t** device)
+{
+ /*
+ * Simply verify the parameters, and dispatch the call inside the
+ * EmulatedCameraFactory instance.
+ */
+
+ if (module != &HAL_MODULE_INFO_SYM.common) {
+ ALOGE("%s: Invalid module %p expected %p",
+ __FUNCTION__, module, &HAL_MODULE_INFO_SYM.common);
+ return -EINVAL;
+ }
+ if (name == NULL) {
+ ALOGE("%s: NULL name is not expected here", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ return gEmulatedCameraFactory.cameraDeviceOpen(atoi(name), device);
+}
+
+int EmulatedCameraFactory::get_number_of_cameras(void)
+{
+ return gEmulatedCameraFactory.getEmulatedCameraNum();
+}
+
+int EmulatedCameraFactory::get_camera_info(int camera_id,
+ struct camera_info* info)
+{
+ return gEmulatedCameraFactory.getCameraInfo(camera_id, info);
+}
+
+int EmulatedCameraFactory::set_callbacks(
+ const camera_module_callbacks_t *callbacks)
+{
+ return gEmulatedCameraFactory.setCallbacks(callbacks);
+}
+
+void EmulatedCameraFactory::get_vendor_tag_ops(vendor_tag_ops_t* ops)
+{
+ gEmulatedCameraFactory.getVendorTagOps(ops);
+}
+
+int EmulatedCameraFactory::open_legacy(const struct hw_module_t* module,
+ const char* id, uint32_t halVersion, struct hw_device_t** device) {
+ // Not supporting legacy open
+ return -ENOSYS;
+}
+
+/********************************************************************************
+ * Internal API
+ *******************************************************************************/
+
+/*
+ * Camera information tokens passed in response to the "list" factory query.
+ */
+
+/* Device name token. */
+static const char lListNameToken[] = "name=";
+/* Frame dimensions token. */
+static const char lListDimsToken[] = "framedims=";
+/* Facing direction token. */
+static const char lListDirToken[] = "dir=";
+
+void EmulatedCameraFactory::createQemuCameras()
+{
+ /* Obtain camera list. */
+ char* camera_list = NULL;
+ status_t res = mQemuClient.listCameras(&camera_list);
+ /* Empty list, or list containing just an EOL means that there were no
+ * connected cameras found. */
+ if (res != NO_ERROR || camera_list == NULL || *camera_list == '\0' ||
+ *camera_list == '\n') {
+ if (camera_list != NULL) {
+ free(camera_list);
+ }
+ return;
+ }
+
+ /*
+ * Calculate number of connected cameras. Number of EOLs in the camera list
+ * is the number of the connected cameras.
+ */
+
+ int num = 0;
+ const char* eol = strchr(camera_list, '\n');
+ while (eol != NULL) {
+ num++;
+ eol = strchr(eol + 1, '\n');
+ }
+
+ /* Allocate the array for emulated camera instances. Note that we allocate
+ * two more entries for back and front fake camera emulation. */
+ mEmulatedCameras = new EmulatedBaseCamera*[num + 2];
+ if (mEmulatedCameras == NULL) {
+ ALOGE("%s: Unable to allocate emulated camera array for %d entries",
+ __FUNCTION__, num + 1);
+ free(camera_list);
+ return;
+ }
+ memset(mEmulatedCameras, 0, sizeof(EmulatedBaseCamera*) * (num + 1));
+
+ /*
+ * Iterate the list, creating, and initializin emulated qemu cameras for each
+ * entry (line) in the list.
+ */
+
+ int index = 0;
+ char* cur_entry = camera_list;
+ while (cur_entry != NULL && *cur_entry != '\0' && index < num) {
+ /* Find the end of the current camera entry, and terminate it with zero
+ * for simpler string manipulation. */
+ char* next_entry = strchr(cur_entry, '\n');
+ if (next_entry != NULL) {
+ *next_entry = '\0';
+ next_entry++; // Start of the next entry.
+ }
+
+ /* Find 'name', 'framedims', and 'dir' tokens that are required here. */
+ char* name_start = strstr(cur_entry, lListNameToken);
+ char* dim_start = strstr(cur_entry, lListDimsToken);
+ char* dir_start = strstr(cur_entry, lListDirToken);
+ if (name_start != NULL && dim_start != NULL && dir_start != NULL) {
+ /* Advance to the token values. */
+ name_start += strlen(lListNameToken);
+ dim_start += strlen(lListDimsToken);
+ dir_start += strlen(lListDirToken);
+
+ /* Terminate token values with zero. */
+ char* s = strchr(name_start, ' ');
+ if (s != NULL) {
+ *s = '\0';
+ }
+ s = strchr(dim_start, ' ');
+ if (s != NULL) {
+ *s = '\0';
+ }
+ s = strchr(dir_start, ' ');
+ if (s != NULL) {
+ *s = '\0';
+ }
+
+ /* Create and initialize qemu camera. */
+ EmulatedQemuCamera* qemu_cam =
+ new EmulatedQemuCamera(index, &HAL_MODULE_INFO_SYM.common);
+ if (NULL != qemu_cam) {
+ res = qemu_cam->Initialize(name_start, dim_start, dir_start);
+ if (res == NO_ERROR) {
+ mEmulatedCameras[index] = qemu_cam;
+ index++;
+ } else {
+ delete qemu_cam;
+ }
+ } else {
+ ALOGE("%s: Unable to instantiate EmulatedQemuCamera",
+ __FUNCTION__);
+ }
+ } else {
+ ALOGW("%s: Bad camera information: %s", __FUNCTION__, cur_entry);
+ }
+
+ cur_entry = next_entry;
+ }
+
+ mEmulatedCameraNum = index;
+}
+
+bool EmulatedCameraFactory::isBackFakeCameraEmulationOn()
+{
+ /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
+ * is set to 'both', or 'back', then fake camera is used to emulate back
+ * camera. */
+ char prop[PROPERTY_VALUE_MAX];
+ if ((property_get("qemu.sf.fake_camera", prop, NULL) > 0) &&
+ (!strcmp(prop, "both") || !strcmp(prop, "back"))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int EmulatedCameraFactory::getBackCameraHalVersion()
+{
+ /* Defined by 'qemu.sf.back_camera_hal_version' boot property: if the
+ * property doesn't exist, it is assumed to be 1. */
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get("qemu.sf.back_camera_hal", prop, NULL) > 0) {
+ char *prop_end = prop;
+ int val = strtol(prop, &prop_end, 10);
+ if (*prop_end == '\0') {
+ return val;
+ }
+ // Badly formatted property, should just be a number
+ ALOGE("qemu.sf.back_camera_hal is not a number: %s", prop);
+ }
+ return 1;
+}
+
+bool EmulatedCameraFactory::isFrontFakeCameraEmulationOn()
+{
+ /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
+ * is set to 'both', or 'front', then fake camera is used to emulate front
+ * camera. */
+ char prop[PROPERTY_VALUE_MAX];
+ if ((property_get("qemu.sf.fake_camera", prop, NULL) > 0) &&
+ (!strcmp(prop, "both") || !strcmp(prop, "front"))) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int EmulatedCameraFactory::getFrontCameraHalVersion()
+{
+ /* Defined by 'qemu.sf.front_camera_hal_version' boot property: if the
+ * property doesn't exist, it is assumed to be 1. */
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get("qemu.sf.front_camera_hal", prop, NULL) > 0) {
+ char *prop_end = prop;
+ int val = strtol(prop, &prop_end, 10);
+ if (*prop_end == '\0') {
+ return val;
+ }
+ // Badly formatted property, should just be a number
+ ALOGE("qemu.sf.front_camera_hal is not a number: %s", prop);
+ }
+ return 1;
+}
+
+void EmulatedCameraFactory::onStatusChanged(int cameraId, int newStatus) {
+
+ EmulatedBaseCamera *cam = mEmulatedCameras[cameraId];
+ if (!cam) {
+ ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+ return;
+ }
+
+ /**
+ * (Order is important)
+ * Send the callback first to framework, THEN close the camera.
+ */
+
+ if (newStatus == cam->getHotplugStatus()) {
+ ALOGW("%s: Ignoring transition to the same status", __FUNCTION__);
+ return;
+ }
+
+ const camera_module_callbacks_t* cb = mCallbacks;
+ if (cb != NULL && cb->camera_device_status_change != NULL) {
+ cb->camera_device_status_change(cb, cameraId, newStatus);
+ }
+
+ if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+ cam->unplugCamera();
+ } else if (newStatus == CAMERA_DEVICE_STATUS_PRESENT) {
+ cam->plugCamera();
+ }
+
+}
+
+/********************************************************************************
+ * Initializer for the static member structure.
+ *******************************************************************************/
+
+/* Entry point for camera HAL API. */
+struct hw_module_methods_t EmulatedCameraFactory::mCameraModuleMethods = {
+ open: EmulatedCameraFactory::device_open
+};
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraFactory.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraFactory.h
new file mode 100644
index 0000000..3f19be1
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraFactory.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H
+
+#include
+#include "EmulatedBaseCamera.h"
+#include "QemuClient.h"
+
+namespace android {
+
+struct EmulatedCameraHotplugThread;
+
+/*
+ * Contains declaration of a class EmulatedCameraFactory that manages cameras
+ * available for the emulation. A global instance of this class is statically
+ * instantiated and initialized when camera emulation HAL is loaded.
+ */
+
+/* Class EmulatedCameraFactoryManages cameras available for the emulation.
+ *
+ * When the global static instance of this class is created on the module load,
+ * it enumerates cameras available for the emulation by connecting to the
+ * emulator's 'camera' service. For every camera found out there it creates an
+ * instance of an appropriate class, and stores it an in array of emulated
+ * cameras. In addition to the cameras reported by the emulator, a fake camera
+ * emulator is always created, so there is always at least one camera that is
+ * available.
+ *
+ * Instance of this class is also used as the entry point for the camera HAL API,
+ * including:
+ * - hw_module_methods_t::open entry point
+ * - camera_module_t::get_number_of_cameras entry point
+ * - camera_module_t::get_camera_info entry point
+ *
+ */
+class EmulatedCameraFactory {
+public:
+ /* Constructs EmulatedCameraFactory instance.
+ * In this constructor the factory will create and initialize a list of
+ * emulated cameras. All errors that occur on this constructor are reported
+ * via mConstructedOK data member of this class.
+ */
+ EmulatedCameraFactory();
+
+ /* Destructs EmulatedCameraFactory instance. */
+ ~EmulatedCameraFactory();
+
+ /****************************************************************************
+ * Camera HAL API handlers.
+ ***************************************************************************/
+
+public:
+ /* Opens (connects to) a camera device.
+ * This method is called in response to hw_module_methods_t::open callback.
+ */
+ int cameraDeviceOpen(int camera_id, hw_device_t** device);
+
+ /* Gets emulated camera information.
+ * This method is called in response to camera_module_t::get_camera_info callback.
+ */
+ int getCameraInfo(int camera_id, struct camera_info *info);
+
+ /* Sets emulated camera callbacks.
+ * This method is called in response to camera_module_t::set_callbacks callback.
+ */
+ int setCallbacks(const camera_module_callbacks_t *callbacks);
+
+ /* Fill in vendor tags for the module
+ * This method is called in response to camera_module_t::get_vendor_tag_ops callback.
+ */
+ void getVendorTagOps(vendor_tag_ops_t* ops);
+
+ /****************************************************************************
+ * Camera HAL API callbacks.
+ ***************************************************************************/
+
+public:
+ /* camera_module_t::get_number_of_cameras callback entry point. */
+ static int get_number_of_cameras(void);
+
+ /* camera_module_t::get_camera_info callback entry point. */
+ static int get_camera_info(int camera_id, struct camera_info *info);
+
+ /* camera_module_t::set_callbacks callback entry point. */
+ static int set_callbacks(const camera_module_callbacks_t *callbacks);
+
+ /* camera_module_t::get_vendor_tag_ops callback entry point */
+ static void get_vendor_tag_ops(vendor_tag_ops_t* ops);
+
+ /* camera_module_t::open_legacy callback entry point */
+ static int open_legacy(const struct hw_module_t* module, const char* id,
+ uint32_t halVersion, struct hw_device_t** device);
+
+private:
+ /* hw_module_methods_t::open callback entry point. */
+ static int device_open(const hw_module_t* module,
+ const char* name,
+ hw_device_t** device);
+
+ /****************************************************************************
+ * Public API.
+ ***************************************************************************/
+
+public:
+
+ /* Gets fake camera orientation. */
+ int getFakeCameraOrientation() {
+ /* TODO: Have a boot property that controls that. */
+ return 90;
+ }
+
+ /* Gets qemu camera orientation. */
+ int getQemuCameraOrientation() {
+ /* TODO: Have a boot property that controls that. */
+ return 270;
+ }
+
+ /* Gets number of emulated cameras.
+ */
+ int getEmulatedCameraNum() const {
+ return mEmulatedCameraNum;
+ }
+
+ /* Checks whether or not the constructor has succeeded.
+ */
+ bool isConstructedOK() const {
+ return mConstructedOK;
+ }
+
+ void onStatusChanged(int cameraId, int newStatus);
+
+ /****************************************************************************
+ * Private API
+ ***************************************************************************/
+
+private:
+ /* Populates emulated cameras array with cameras that are available via
+ * 'camera' service in the emulator. For each such camera and instance of
+ * the EmulatedCameraQemud will be created and added to the mEmulatedCameras
+ * array.
+ */
+ void createQemuCameras();
+
+ /* Checks if fake camera emulation is on for the camera facing back. */
+ bool isBackFakeCameraEmulationOn();
+
+ /* Gets camera device version number to use for back camera emulation */
+ int getBackCameraHalVersion();
+
+ /* Checks if fake camera emulation is on for the camera facing front. */
+ bool isFrontFakeCameraEmulationOn();
+
+ /* Gets camera device version number to use for front camera emulation */
+ int getFrontCameraHalVersion();
+
+ /****************************************************************************
+ * Data members.
+ ***************************************************************************/
+
+private:
+ /* Connection to the camera service in the emulator. */
+ FactoryQemuClient mQemuClient;
+
+ /* Array of cameras available for the emulation. */
+ EmulatedBaseCamera** mEmulatedCameras;
+
+ /* Number of emulated cameras (including the fake ones). */
+ int mEmulatedCameraNum;
+
+ /* Number of emulated fake cameras. */
+ int mFakeCameraNum;
+
+ /* Flags whether or not constructor has succeeded. */
+ bool mConstructedOK;
+
+ /* Camera callbacks (for status changing) */
+ const camera_module_callbacks_t* mCallbacks;
+
+ /* Hotplug thread (to call onStatusChanged) */
+ sp mHotplugThread;
+
+public:
+ /* Contains device open entry point, as required by HAL API. */
+ static struct hw_module_methods_t mCameraModuleMethods;
+};
+
+}; /* namespace android */
+
+/* References the global EmulatedCameraFactory instance. */
+extern android::EmulatedCameraFactory gEmulatedCameraFactory;
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHal.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHal.cpp
new file mode 100644
index 0000000..b1f8b3a
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHal.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of the camera HAL layer in the system running
+ * under the emulator.
+ *
+ * This file contains only required HAL header, which directs all the API calls
+ * to the EmulatedCameraFactory class implementation, wich is responsible for
+ * managing emulated cameras.
+ */
+
+#include "EmulatedCameraFactory.h"
+
+/*
+ * Required HAL header.
+ */
+camera_module_t HAL_MODULE_INFO_SYM = {
+ common: {
+ tag: HARDWARE_MODULE_TAG,
+ module_api_version: CAMERA_MODULE_API_VERSION_2_3,
+ hal_api_version: HARDWARE_HAL_API_VERSION,
+ id: CAMERA_HARDWARE_MODULE_ID,
+ name: "Emulated Camera Module",
+ author: "The Android Open Source Project",
+ methods: &android::EmulatedCameraFactory::mCameraModuleMethods,
+ dso: NULL,
+ reserved: {0},
+ },
+ get_number_of_cameras: android::EmulatedCameraFactory::get_number_of_cameras,
+ get_camera_info: android::EmulatedCameraFactory::get_camera_info,
+ set_callbacks: android::EmulatedCameraFactory::set_callbacks,
+ get_vendor_tag_ops: android::EmulatedCameraFactory::get_vendor_tag_ops,
+ open_legacy: android::EmulatedCameraFactory::open_legacy
+};
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHotplugThread.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHotplugThread.cpp
new file mode 100644
index 0000000..bc629c1
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHotplugThread.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_HotplugThread"
+#include
+
+#include
+#include
+#include
+#include
+
+#include "EmulatedCameraHotplugThread.h"
+#include "EmulatedCameraFactory.h"
+
+#define FAKE_HOTPLUG_FILE "/data/misc/media/emulator.camera.hotplug"
+
+#define EVENT_SIZE (sizeof(struct inotify_event))
+#define EVENT_BUF_LEN (1024*(EVENT_SIZE+16))
+
+#define SubscriberInfo EmulatedCameraHotplugThread::SubscriberInfo
+
+namespace android {
+
+EmulatedCameraHotplugThread::EmulatedCameraHotplugThread(
+ const int* cameraIdArray,
+ size_t size) :
+ Thread(/*canCallJava*/false) {
+
+ mRunning = true;
+ mInotifyFd = 0;
+
+ for (size_t i = 0; i < size; ++i) {
+ int id = cameraIdArray[i];
+
+ if (createFileIfNotExists(id)) {
+ mSubscribedCameraIds.push_back(id);
+ }
+ }
+}
+
+EmulatedCameraHotplugThread::~EmulatedCameraHotplugThread() {
+}
+
+status_t EmulatedCameraHotplugThread::requestExitAndWait() {
+ ALOGE("%s: Not implemented. Use requestExit + join instead",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+void EmulatedCameraHotplugThread::requestExit() {
+ Mutex::Autolock al(mMutex);
+
+ ALOGV("%s: Requesting thread exit", __FUNCTION__);
+ mRunning = false;
+
+ bool rmWatchFailed = false;
+ Vector::iterator it;
+ for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+
+ if (inotify_rm_watch(mInotifyFd, it->WatchID) == -1) {
+
+ ALOGE("%s: Could not remove watch for camID '%d',"
+ " error: '%s' (%d)",
+ __FUNCTION__, it->CameraID, strerror(errno),
+ errno);
+
+ rmWatchFailed = true ;
+ } else {
+ ALOGV("%s: Removed watch for camID '%d'",
+ __FUNCTION__, it->CameraID);
+ }
+ }
+
+ if (rmWatchFailed) { // unlikely
+ // Give the thread a fighting chance to error out on the next
+ // read
+ if (close(mInotifyFd) == -1) {
+ ALOGE("%s: close failure error: '%s' (%d)",
+ __FUNCTION__, strerror(errno), errno);
+ }
+ }
+
+ ALOGV("%s: Request exit complete.", __FUNCTION__);
+}
+
+status_t EmulatedCameraHotplugThread::readyToRun() {
+ Mutex::Autolock al(mMutex);
+
+ mInotifyFd = -1;
+
+ do {
+ ALOGV("%s: Initializing inotify", __FUNCTION__);
+
+ mInotifyFd = inotify_init();
+ if (mInotifyFd == -1) {
+ ALOGE("%s: inotify_init failure error: '%s' (%d)",
+ __FUNCTION__, strerror(errno), errno);
+ mRunning = false;
+ break;
+ }
+
+ /**
+ * For each fake camera file, add a watch for when
+ * the file is closed (if it was written to)
+ */
+ Vector::const_iterator it, end;
+ it = mSubscribedCameraIds.begin();
+ end = mSubscribedCameraIds.end();
+ for (; it != end; ++it) {
+ int cameraId = *it;
+ if (!addWatch(cameraId)) {
+ mRunning = false;
+ break;
+ }
+ }
+ } while(false);
+
+ if (!mRunning) {
+ status_t err = -errno;
+
+ if (mInotifyFd != -1) {
+ close(mInotifyFd);
+ }
+
+ return err;
+ }
+
+ return OK;
+}
+
+bool EmulatedCameraHotplugThread::threadLoop() {
+
+ // If requestExit was already called, mRunning will be false
+ while (mRunning) {
+ char buffer[EVENT_BUF_LEN];
+ int length = TEMP_FAILURE_RETRY(
+ read(mInotifyFd, buffer, EVENT_BUF_LEN));
+
+ if (length < 0) {
+ ALOGE("%s: Error reading from inotify FD, error: '%s' (%d)",
+ __FUNCTION__, strerror(errno),
+ errno);
+ mRunning = false;
+ break;
+ }
+
+ ALOGV("%s: Read %d bytes from inotify FD", __FUNCTION__, length);
+
+ int i = 0;
+ while (i < length) {
+ inotify_event* event = (inotify_event*) &buffer[i];
+
+ if (event->mask & IN_IGNORED) {
+ Mutex::Autolock al(mMutex);
+ if (!mRunning) {
+ ALOGV("%s: Shutting down thread", __FUNCTION__);
+ break;
+ } else {
+ ALOGE("%s: File was deleted, aborting",
+ __FUNCTION__);
+ mRunning = false;
+ break;
+ }
+ } else if (event->mask & IN_CLOSE_WRITE) {
+ int cameraId = getCameraId(event->wd);
+
+ if (cameraId < 0) {
+ ALOGE("%s: Got bad camera ID from WD '%d",
+ __FUNCTION__, event->wd);
+ } else {
+ // Check the file for the new hotplug event
+ String8 filePath = getFilePath(cameraId);
+ /**
+ * NOTE: we carefully avoid getting an inotify
+ * for the same exact file because it's opened for
+ * read-only, but our inotify is for write-only
+ */
+ int newStatus = readFile(filePath);
+
+ if (newStatus < 0) {
+ mRunning = false;
+ break;
+ }
+
+ int halStatus = newStatus ?
+ CAMERA_DEVICE_STATUS_PRESENT :
+ CAMERA_DEVICE_STATUS_NOT_PRESENT;
+ gEmulatedCameraFactory.onStatusChanged(cameraId,
+ halStatus);
+ }
+
+ } else {
+ ALOGW("%s: Unknown mask 0x%x",
+ __FUNCTION__, event->mask);
+ }
+
+ i += EVENT_SIZE + event->len;
+ }
+ }
+
+ if (!mRunning) {
+ close(mInotifyFd);
+ return false;
+ }
+
+ return true;
+}
+
+String8 EmulatedCameraHotplugThread::getFilePath(int cameraId) const {
+ return String8::format(FAKE_HOTPLUG_FILE ".%d", cameraId);
+}
+
+bool EmulatedCameraHotplugThread::createFileIfNotExists(int cameraId) const
+{
+ String8 filePath = getFilePath(cameraId);
+ // make sure this file exists and we have access to it
+ int fd = TEMP_FAILURE_RETRY(
+ open(filePath.string(), O_WRONLY | O_CREAT | O_TRUNC,
+ /* mode = ug+rwx */ S_IRWXU | S_IRWXG ));
+ if (fd == -1) {
+ ALOGE("%s: Could not create file '%s', error: '%s' (%d)",
+ __FUNCTION__, filePath.string(), strerror(errno), errno);
+ return false;
+ }
+
+ // File has '1' by default since we are plugged in by default
+ if (TEMP_FAILURE_RETRY(write(fd, "1\n", /*count*/2)) == -1) {
+ ALOGE("%s: Could not write '1' to file '%s', error: '%s' (%d)",
+ __FUNCTION__, filePath.string(), strerror(errno), errno);
+ return false;
+ }
+
+ close(fd);
+ return true;
+}
+
+int EmulatedCameraHotplugThread::getCameraId(String8 filePath) const {
+ Vector::const_iterator it, end;
+ it = mSubscribedCameraIds.begin();
+ end = mSubscribedCameraIds.end();
+ for (; it != end; ++it) {
+ String8 camPath = getFilePath(*it);
+
+ if (camPath == filePath) {
+ return *it;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+int EmulatedCameraHotplugThread::getCameraId(int wd) const {
+ for (size_t i = 0; i < mSubscribers.size(); ++i) {
+ if (mSubscribers[i].WatchID == wd) {
+ return mSubscribers[i].CameraID;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+SubscriberInfo* EmulatedCameraHotplugThread::getSubscriberInfo(int cameraId)
+{
+ for (size_t i = 0; i < mSubscribers.size(); ++i) {
+ if (mSubscribers[i].CameraID == cameraId) {
+ return (SubscriberInfo*)&mSubscribers[i];
+ }
+ }
+
+ return NULL;
+}
+
+bool EmulatedCameraHotplugThread::addWatch(int cameraId) {
+ String8 camPath = getFilePath(cameraId);
+ int wd = inotify_add_watch(mInotifyFd,
+ camPath.string(),
+ IN_CLOSE_WRITE);
+
+ if (wd == -1) {
+ ALOGE("%s: Could not add watch for '%s', error: '%s' (%d)",
+ __FUNCTION__, camPath.string(), strerror(errno),
+ errno);
+
+ mRunning = false;
+ return false;
+ }
+
+ ALOGV("%s: Watch added for camID='%d', wd='%d'",
+ __FUNCTION__, cameraId, wd);
+
+ SubscriberInfo si = { cameraId, wd };
+ mSubscribers.push_back(si);
+
+ return true;
+}
+
+bool EmulatedCameraHotplugThread::removeWatch(int cameraId) {
+ SubscriberInfo* si = getSubscriberInfo(cameraId);
+
+ if (!si) return false;
+
+ if (inotify_rm_watch(mInotifyFd, si->WatchID) == -1) {
+
+ ALOGE("%s: Could not remove watch for camID '%d', error: '%s' (%d)",
+ __FUNCTION__, cameraId, strerror(errno),
+ errno);
+
+ return false;
+ }
+
+ Vector::iterator it;
+ for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+ if (it->CameraID == cameraId) {
+ break;
+ }
+ }
+
+ if (it != mSubscribers.end()) {
+ mSubscribers.erase(it);
+ }
+
+ return true;
+}
+
+int EmulatedCameraHotplugThread::readFile(String8 filePath) const {
+
+ int fd = TEMP_FAILURE_RETRY(
+ open(filePath.string(), O_RDONLY, /*mode*/0));
+ if (fd == -1) {
+ ALOGE("%s: Could not open file '%s', error: '%s' (%d)",
+ __FUNCTION__, filePath.string(), strerror(errno), errno);
+ return -1;
+ }
+
+ char buffer[1];
+ int length;
+
+ length = TEMP_FAILURE_RETRY(
+ read(fd, buffer, sizeof(buffer)));
+
+ int retval;
+
+ ALOGV("%s: Read file '%s', length='%d', buffer='%c'",
+ __FUNCTION__, filePath.string(), length, buffer[0]);
+
+ if (length == 0) { // EOF
+ retval = 0; // empty file is the same thing as 0
+ } else if (buffer[0] == '0') {
+ retval = 0;
+ } else { // anything non-empty that's not beginning with '0'
+ retval = 1;
+ }
+
+ close(fd);
+
+ return retval;
+}
+
+} //namespace android
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHotplugThread.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHotplugThread.h
new file mode 100644
index 0000000..3e26e71
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedCameraHotplugThread.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_CAMERA_HOTPLUG_H
+#define HW_EMULATOR_CAMERA_EMULATED_CAMERA_HOTPLUG_H
+
+/**
+ * This class emulates hotplug events by inotifying on a file, specific
+ * to a camera ID. When the file changes between 1/0 the hotplug
+ * status goes between PRESENT and NOT_PRESENT.
+ *
+ * Refer to FAKE_HOTPLUG_FILE in EmulatedCameraHotplugThread.cpp
+ */
+
+#include "EmulatedCamera2.h"
+#include
+#include
+
+namespace android {
+class EmulatedCameraHotplugThread : public Thread {
+ public:
+ EmulatedCameraHotplugThread(const int* cameraIdArray, size_t size);
+ ~EmulatedCameraHotplugThread();
+
+ virtual void requestExit();
+ virtual status_t requestExitAndWait();
+
+ private:
+
+
+ virtual status_t readyToRun();
+ virtual bool threadLoop();
+
+ struct SubscriberInfo {
+ int CameraID;
+ int WatchID;
+ };
+
+ bool addWatch(int cameraId);
+ bool removeWatch(int cameraId);
+ SubscriberInfo* getSubscriberInfo(int cameraId);
+
+ int getCameraId(String8 filePath) const;
+ int getCameraId(int wd) const;
+
+ String8 getFilePath(int cameraId) const;
+ int readFile(String8 filePath) const;
+
+ bool createFileIfNotExists(int cameraId) const;
+
+ int mInotifyFd;
+ Vector mSubscribedCameraIds;
+ Vector mSubscribers;
+
+ // variables above are unguarded:
+ // -- accessed in thread loop or in constructor only
+
+ Mutex mMutex;
+
+ bool mRunning; // guarding only when it's important
+};
+} // namespace android
+
+#endif
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera.cpp
new file mode 100644
index 0000000..457850d
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCamera that encapsulates
+ * functionality of a fake camera.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeCamera"
+#include
+#include
+#include "EmulatedFakeCamera.h"
+#include "EmulatedCameraFactory.h"
+
+namespace android {
+
+EmulatedFakeCamera::EmulatedFakeCamera(int cameraId,
+ bool facingBack,
+ struct hw_module_t* module)
+ : EmulatedCamera(cameraId, module),
+ mFacingBack(facingBack),
+ mFakeCameraDevice(this)
+{
+}
+
+EmulatedFakeCamera::~EmulatedFakeCamera()
+{
+}
+
+/****************************************************************************
+ * Public API overrides
+ ***************************************************************************/
+
+status_t EmulatedFakeCamera::Initialize()
+{
+ status_t res = mFakeCameraDevice.Initialize();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ const char* facing = mFacingBack ? EmulatedCamera::FACING_BACK :
+ EmulatedCamera::FACING_FRONT;
+
+ mParameters.set(EmulatedCamera::FACING_KEY, facing);
+ ALOGD("%s: Fake camera is facing %s", __FUNCTION__, facing);
+
+ mParameters.set(EmulatedCamera::ORIENTATION_KEY,
+ gEmulatedCameraFactory.getFakeCameraOrientation());
+
+ res = EmulatedCamera::Initialize();
+ if (res != NO_ERROR) {
+ return res;
+ }
+
+ /*
+ * Parameters provided by the camera device.
+ */
+
+ /* 352x288 and 320x240 frame dimensions are required by the framework for
+ * video mode preview and video recording. */
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
+ "640x480,352x288,320x240");
+ mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
+ "640x480,352x288,320x240");
+ mParameters.setPreviewSize(640, 480);
+ mParameters.setPictureSize(640, 480);
+
+ return NO_ERROR;
+}
+
+EmulatedCameraDevice* EmulatedFakeCamera::getCameraDevice()
+{
+ return &mFakeCameraDevice;
+}
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera.h
new file mode 100644
index 0000000..4bfbd70
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H
+#define HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H
+
+/*
+ * Contains declaration of a class EmulatedFakeCamera that encapsulates
+ * functionality of a fake camera. This class is nothing more than a placeholder
+ * for EmulatedFakeCameraDevice instance.
+ */
+
+#include "EmulatedCamera.h"
+#include "EmulatedFakeCameraDevice.h"
+
+namespace android {
+
+/* Encapsulates functionality of a fake camera.
+ * This class is nothing more than a placeholder for EmulatedFakeCameraDevice
+ * instance that emulates a fake camera device.
+ */
+class EmulatedFakeCamera : public EmulatedCamera {
+public:
+ /* Constructs EmulatedFakeCamera instance. */
+ EmulatedFakeCamera(int cameraId, bool facingBack, struct hw_module_t* module);
+
+ /* Destructs EmulatedFakeCamera instance. */
+ ~EmulatedFakeCamera();
+
+ /****************************************************************************
+ * EmulatedCamera virtual overrides.
+ ***************************************************************************/
+
+public:
+ /* Initializes EmulatedFakeCamera instance. */
+ status_t Initialize();
+
+ /****************************************************************************
+ * EmulatedCamera abstract API implementation.
+ ***************************************************************************/
+
+protected:
+ /* Gets emulated camera device ised by this instance of the emulated camera.
+ */
+ EmulatedCameraDevice* getCameraDevice();
+
+ /****************************************************************************
+ * Data memebers.
+ ***************************************************************************/
+
+protected:
+ /* Facing back (true) or front (false) switch. */
+ bool mFacingBack;
+
+ /* Contained fake camera device object. */
+ EmulatedFakeCameraDevice mFakeCameraDevice;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera2.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera2.cpp
new file mode 100644
index 0000000..d1beb92
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera2.cpp
@@ -0,0 +1,2717 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCamera2 that encapsulates
+ * functionality of an advanced fake camera.
+ */
+
+#include
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeCamera2"
+#include
+
+#include "EmulatedFakeCamera2.h"
+#include "EmulatedCameraFactory.h"
+#include
+#include
+#include "gralloc_cb.h"
+
+#define ERROR_CAMERA_NOT_PRESENT -EPIPE
+
+#define CAMERA2_EXT_TRIGGER_TESTING_DISCONNECT 0xFFFFFFFF
+
+namespace android {
+
+const int64_t USEC = 1000LL;
+const int64_t MSEC = USEC * 1000LL;
+const int64_t SEC = MSEC * 1000LL;
+
+const uint32_t EmulatedFakeCamera2::kAvailableFormats[4] = {
+ HAL_PIXEL_FORMAT_RAW16,
+ HAL_PIXEL_FORMAT_BLOB,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ // HAL_PIXEL_FORMAT_YV12,
+ HAL_PIXEL_FORMAT_YCrCb_420_SP
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableRawSizes[2] = {
+ 640, 480
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint64_t EmulatedFakeCamera2::kAvailableRawMinDurations[1] = {
+ static_cast(Sensor::kFrameDurationRange[0])
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableProcessedSizesBack[4] = {
+ 640, 480, 320, 240
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableProcessedSizesFront[4] = {
+ 320, 240, 160, 120
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint64_t EmulatedFakeCamera2::kAvailableProcessedMinDurations[1] = {
+ static_cast(Sensor::kFrameDurationRange[0])
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableJpegSizesBack[2] = {
+ 640, 480
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+const uint32_t EmulatedFakeCamera2::kAvailableJpegSizesFront[2] = {
+ 320, 240
+ // Sensor::kResolution[0], Sensor::kResolution[1]
+};
+
+
+const uint64_t EmulatedFakeCamera2::kAvailableJpegMinDurations[1] = {
+ static_cast(Sensor::kFrameDurationRange[0])
+};
+
+
+EmulatedFakeCamera2::EmulatedFakeCamera2(int cameraId,
+ bool facingBack,
+ struct hw_module_t* module)
+ : EmulatedCamera2(cameraId,module),
+ mFacingBack(facingBack),
+ mIsConnected(false)
+{
+ ALOGD("Constructing emulated fake camera 2 facing %s",
+ facingBack ? "back" : "front");
+}
+
+EmulatedFakeCamera2::~EmulatedFakeCamera2() {
+ if (mCameraInfo != NULL) {
+ free_camera_metadata(mCameraInfo);
+ }
+}
+
+/****************************************************************************
+ * Public API overrides
+ ***************************************************************************/
+
+status_t EmulatedFakeCamera2::Initialize() {
+ status_t res;
+
+ res = constructStaticInfo(&mCameraInfo, true);
+ if (res != OK) {
+ ALOGE("%s: Unable to allocate static info: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ res = constructStaticInfo(&mCameraInfo, false);
+ if (res != OK) {
+ ALOGE("%s: Unable to fill in static info: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ if (res != OK) return res;
+
+ mNextStreamId = 1;
+ mNextReprocessStreamId = 1;
+ mRawStreamCount = 0;
+ mProcessedStreamCount = 0;
+ mJpegStreamCount = 0;
+ mReprocessStreamCount = 0;
+
+ return NO_ERROR;
+}
+
+/****************************************************************************
+ * Camera module API overrides
+ ***************************************************************************/
+
+status_t EmulatedFakeCamera2::connectCamera(hw_device_t** device) {
+ status_t res;
+ ALOGV("%s", __FUNCTION__);
+
+ {
+ Mutex::Autolock l(mMutex);
+ if (!mStatusPresent) {
+ ALOGE("%s: Camera ID %d is unplugged", __FUNCTION__,
+ mCameraID);
+ return -ENODEV;
+ }
+ }
+
+ mConfigureThread = new ConfigureThread(this);
+ mReadoutThread = new ReadoutThread(this);
+ mControlThread = new ControlThread(this);
+ mSensor = new Sensor();
+ mJpegCompressor = new JpegCompressor();
+
+ mNextStreamId = 1;
+ mNextReprocessStreamId = 1;
+
+ res = mSensor->startUp();
+ if (res != NO_ERROR) return res;
+
+ res = mConfigureThread->run("EmulatedFakeCamera2::configureThread");
+ if (res != NO_ERROR) return res;
+
+ res = mReadoutThread->run("EmulatedFakeCamera2::readoutThread");
+ if (res != NO_ERROR) return res;
+
+ res = mControlThread->run("EmulatedFakeCamera2::controlThread");
+ if (res != NO_ERROR) return res;
+
+ status_t ret = EmulatedCamera2::connectCamera(device);
+
+ if (ret >= 0) {
+ mIsConnected = true;
+ }
+
+ return ret;
+}
+
+status_t EmulatedFakeCamera2::plugCamera() {
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGI("%s: Plugged back in", __FUNCTION__);
+ mStatusPresent = true;
+ }
+ }
+
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::unplugCamera() {
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (mStatusPresent) {
+ ALOGI("%s: Unplugged camera", __FUNCTION__);
+ mStatusPresent = false;
+ }
+ }
+
+ return closeCamera();
+}
+
+camera_device_status_t EmulatedFakeCamera2::getHotplugStatus() {
+ Mutex::Autolock l(mMutex);
+ return mStatusPresent ?
+ CAMERA_DEVICE_STATUS_PRESENT :
+ CAMERA_DEVICE_STATUS_NOT_PRESENT;
+}
+
+
+
+status_t EmulatedFakeCamera2::closeCamera() {
+ {
+ Mutex::Autolock l(mMutex);
+
+ status_t res;
+ ALOGV("%s", __FUNCTION__);
+
+ if (!mIsConnected) {
+ return NO_ERROR;
+ }
+
+ res = mSensor->shutDown();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+ return res;
+ }
+
+ mConfigureThread->requestExit();
+ mReadoutThread->requestExit();
+ mControlThread->requestExit();
+ mJpegCompressor->cancel();
+ }
+
+ // give up the lock since we will now block and the threads
+ // can call back into this object
+ mConfigureThread->join();
+ mReadoutThread->join();
+ mControlThread->join();
+
+ ALOGV("%s exit", __FUNCTION__);
+
+ {
+ Mutex::Autolock l(mMutex);
+ mIsConnected = false;
+ }
+
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::getCameraInfo(struct camera_info *info) {
+ info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+ info->orientation = gEmulatedCameraFactory.getFakeCameraOrientation();
+ return EmulatedCamera2::getCameraInfo(info);
+}
+
+/****************************************************************************
+ * Camera device API overrides
+ ***************************************************************************/
+
+/** Request input queue */
+
+int EmulatedFakeCamera2::requestQueueNotify() {
+ ALOGV("Request queue notification received");
+
+ ALOG_ASSERT(mRequestQueueSrc != NULL,
+ "%s: Request queue src not set, but received queue notification!",
+ __FUNCTION__);
+ ALOG_ASSERT(mFrameQueueDst != NULL,
+ "%s: Request queue src not set, but received queue notification!",
+ __FUNCTION__);
+ ALOG_ASSERT(mStreams.size() != 0,
+ "%s: No streams allocated, but received queue notification!",
+ __FUNCTION__);
+ return mConfigureThread->newRequestAvailable();
+}
+
+int EmulatedFakeCamera2::getInProgressCount() {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ int requestCount = 0;
+ requestCount += mConfigureThread->getInProgressCount();
+ requestCount += mReadoutThread->getInProgressCount();
+ requestCount += mJpegCompressor->isBusy() ? 1 : 0;
+
+ return requestCount;
+}
+
+int EmulatedFakeCamera2::constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request) {
+
+ if (request == NULL) return BAD_VALUE;
+ if (request_template < 0 || request_template >= CAMERA2_TEMPLATE_COUNT) {
+ return BAD_VALUE;
+ }
+
+ {
+ Mutex::Autolock l(mMutex);
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+ }
+
+ status_t res;
+ // Pass 1, calculate size and allocate
+ res = constructDefaultRequest(request_template,
+ request,
+ true);
+ if (res != OK) {
+ return res;
+ }
+ // Pass 2, build request
+ res = constructDefaultRequest(request_template,
+ request,
+ false);
+ if (res != OK) {
+ ALOGE("Unable to populate new request for template %d",
+ request_template);
+ }
+
+ return res;
+}
+
+int EmulatedFakeCamera2::allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ // Temporary shim until FORMAT_ZSL is removed
+ if (format == CAMERA2_HAL_PIXEL_FORMAT_ZSL) {
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+
+ if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ unsigned int numFormats = sizeof(kAvailableFormats) / sizeof(uint32_t);
+ unsigned int formatIdx = 0;
+ unsigned int sizeOffsetIdx = 0;
+ for (; formatIdx < numFormats; formatIdx++) {
+ if (format == (int)kAvailableFormats[formatIdx]) break;
+ }
+ if (formatIdx == numFormats) {
+ ALOGE("%s: Format 0x%x is not supported", __FUNCTION__, format);
+ return BAD_VALUE;
+ }
+ }
+
+ const uint32_t *availableSizes;
+ size_t availableSizeCount;
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RAW16:
+ availableSizes = kAvailableRawSizes;
+ availableSizeCount = sizeof(kAvailableRawSizes)/sizeof(uint32_t);
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ availableSizes = mFacingBack ?
+ kAvailableJpegSizesBack : kAvailableJpegSizesFront;
+ availableSizeCount = mFacingBack ?
+ sizeof(kAvailableJpegSizesBack)/sizeof(uint32_t) :
+ sizeof(kAvailableJpegSizesFront)/sizeof(uint32_t);
+ break;
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_YV12:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ availableSizes = mFacingBack ?
+ kAvailableProcessedSizesBack : kAvailableProcessedSizesFront;
+ availableSizeCount = mFacingBack ?
+ sizeof(kAvailableProcessedSizesBack)/sizeof(uint32_t) :
+ sizeof(kAvailableProcessedSizesFront)/sizeof(uint32_t);
+ break;
+ default:
+ ALOGE("%s: Unknown format 0x%x", __FUNCTION__, format);
+ return BAD_VALUE;
+ }
+
+ unsigned int resIdx = 0;
+ for (; resIdx < availableSizeCount; resIdx++) {
+ if (availableSizes[resIdx * 2] == width &&
+ availableSizes[resIdx * 2 + 1] == height) break;
+ }
+ if (resIdx == availableSizeCount) {
+ ALOGE("%s: Format 0x%x does not support resolution %d, %d", __FUNCTION__,
+ format, width, height);
+ return BAD_VALUE;
+ }
+
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RAW16:
+ if (mRawStreamCount >= kMaxRawStreamCount) {
+ ALOGE("%s: Cannot allocate another raw stream (%d already allocated)",
+ __FUNCTION__, mRawStreamCount);
+ return INVALID_OPERATION;
+ }
+ mRawStreamCount++;
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ if (mJpegStreamCount >= kMaxJpegStreamCount) {
+ ALOGE("%s: Cannot allocate another JPEG stream (%d already allocated)",
+ __FUNCTION__, mJpegStreamCount);
+ return INVALID_OPERATION;
+ }
+ mJpegStreamCount++;
+ break;
+ default:
+ if (mProcessedStreamCount >= kMaxProcessedStreamCount) {
+ ALOGE("%s: Cannot allocate another processed stream (%d already allocated)",
+ __FUNCTION__, mProcessedStreamCount);
+ return INVALID_OPERATION;
+ }
+ mProcessedStreamCount++;
+ }
+
+ Stream newStream;
+ newStream.ops = stream_ops;
+ newStream.width = width;
+ newStream.height = height;
+ newStream.format = format;
+ // TODO: Query stride from gralloc
+ newStream.stride = width;
+
+ mStreams.add(mNextStreamId, newStream);
+
+ *stream_id = mNextStreamId;
+ if (format_actual) *format_actual = format;
+ *usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
+ *max_buffers = kMaxBufferCount;
+
+ ALOGV("Stream allocated: %d, %d x %d, 0x%x. U: %x, B: %d",
+ *stream_id, width, height, format, *usage, *max_buffers);
+
+ mNextStreamId++;
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ ALOGV("%s: Stream %d registering %d buffers", __FUNCTION__,
+ stream_id, num_buffers);
+ // Need to find out what the final concrete pixel format for our stream is
+ // Assumes that all buffers have the same format.
+ if (num_buffers < 1) {
+ ALOGE("%s: Stream %d only has %d buffers!",
+ __FUNCTION__, stream_id, num_buffers);
+ return BAD_VALUE;
+ }
+ const cb_handle_t *streamBuffer =
+ reinterpret_cast(buffers[0]);
+
+ int finalFormat = streamBuffer->format;
+
+ if (finalFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ ALOGE("%s: Stream %d: Bad final pixel format "
+ "HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED; "
+ "concrete pixel format required!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ ssize_t streamIndex = mStreams.indexOfKey(stream_id);
+ if (streamIndex < 0) {
+ ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ Stream &stream = mStreams.editValueAt(streamIndex);
+
+ ALOGV("%s: Stream %d format set to %x, previously %x",
+ __FUNCTION__, stream_id, finalFormat, stream.format);
+
+ stream.format = finalFormat;
+
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::releaseStream(uint32_t stream_id) {
+ Mutex::Autolock l(mMutex);
+
+ ssize_t streamIndex = mStreams.indexOfKey(stream_id);
+ if (streamIndex < 0) {
+ ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ if (isStreamInUse(stream_id)) {
+ ALOGE("%s: Cannot release stream %d; in use!", __FUNCTION__,
+ stream_id);
+ return BAD_VALUE;
+ }
+
+ switch(mStreams.valueAt(streamIndex).format) {
+ case HAL_PIXEL_FORMAT_RAW16:
+ mRawStreamCount--;
+ break;
+ case HAL_PIXEL_FORMAT_BLOB:
+ mJpegStreamCount--;
+ break;
+ default:
+ mProcessedStreamCount--;
+ break;
+ }
+
+ mStreams.removeItemsAt(streamIndex);
+
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *stream_ops,
+ uint32_t *stream_id) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ ssize_t baseStreamIndex = mStreams.indexOfKey(output_stream_id);
+ if (baseStreamIndex < 0) {
+ ALOGE("%s: Unknown output stream id %d!", __FUNCTION__, output_stream_id);
+ return BAD_VALUE;
+ }
+
+ const Stream &baseStream = mStreams[baseStreamIndex];
+
+ // We'll reprocess anything we produced
+
+ if (mReprocessStreamCount >= kMaxReprocessStreamCount) {
+ ALOGE("%s: Cannot allocate another reprocess stream (%d already allocated)",
+ __FUNCTION__, mReprocessStreamCount);
+ return INVALID_OPERATION;
+ }
+ mReprocessStreamCount++;
+
+ ReprocessStream newStream;
+ newStream.ops = stream_ops;
+ newStream.width = baseStream.width;
+ newStream.height = baseStream.height;
+ newStream.format = baseStream.format;
+ newStream.stride = baseStream.stride;
+ newStream.sourceStreamId = output_stream_id;
+
+ *stream_id = mNextReprocessStreamId;
+ mReprocessStreams.add(mNextReprocessStreamId, newStream);
+
+ ALOGV("Reprocess stream allocated: %d: %d, %d, 0x%x. Parent stream: %d",
+ *stream_id, newStream.width, newStream.height, newStream.format,
+ output_stream_id);
+
+ mNextReprocessStreamId++;
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::releaseReprocessStream(uint32_t stream_id) {
+ Mutex::Autolock l(mMutex);
+
+ ssize_t streamIndex = mReprocessStreams.indexOfKey(stream_id);
+ if (streamIndex < 0) {
+ ALOGE("%s: Unknown reprocess stream id %d!", __FUNCTION__, stream_id);
+ return BAD_VALUE;
+ }
+
+ if (isReprocessStreamInUse(stream_id)) {
+ ALOGE("%s: Cannot release reprocessing stream %d; in use!", __FUNCTION__,
+ stream_id);
+ return BAD_VALUE;
+ }
+
+ mReprocessStreamCount--;
+ mReprocessStreams.removeItemsAt(streamIndex);
+
+ return NO_ERROR;
+}
+
+int EmulatedFakeCamera2::triggerAction(uint32_t trigger_id,
+ int32_t ext1,
+ int32_t ext2) {
+ Mutex::Autolock l(mMutex);
+
+ if (trigger_id == CAMERA2_EXT_TRIGGER_TESTING_DISCONNECT) {
+ ALOGI("%s: Disconnect trigger - camera must be closed", __FUNCTION__);
+ mStatusPresent = false;
+
+ gEmulatedCameraFactory.onStatusChanged(
+ mCameraID,
+ CAMERA_DEVICE_STATUS_NOT_PRESENT);
+ }
+
+ if (!mStatusPresent) {
+ ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+ return ERROR_CAMERA_NOT_PRESENT;
+ }
+
+ return mControlThread->triggerAction(trigger_id,
+ ext1, ext2);
+}
+
+/** Shutdown and debug methods */
+
+int EmulatedFakeCamera2::dump(int fd) {
+ String8 result;
+
+ result.appendFormat(" Camera HAL device: EmulatedFakeCamera2\n");
+ result.appendFormat(" Streams:\n");
+ for (size_t i = 0; i < mStreams.size(); i++) {
+ int id = mStreams.keyAt(i);
+ const Stream& s = mStreams.valueAt(i);
+ result.appendFormat(
+ " Stream %d: %d x %d, format 0x%x, stride %d\n",
+ id, s.width, s.height, s.format, s.stride);
+ }
+
+ write(fd, result.string(), result.size());
+
+ return NO_ERROR;
+}
+
+void EmulatedFakeCamera2::signalError() {
+ // TODO: Let parent know so we can shut down cleanly
+ ALOGE("Worker thread is signaling a serious error");
+}
+
+/** Pipeline control worker thread methods */
+
+EmulatedFakeCamera2::ConfigureThread::ConfigureThread(EmulatedFakeCamera2 *parent):
+ Thread(false),
+ mParent(parent),
+ mRequestCount(0),
+ mNextBuffers(NULL) {
+ mRunning = false;
+}
+
+EmulatedFakeCamera2::ConfigureThread::~ConfigureThread() {
+}
+
+status_t EmulatedFakeCamera2::ConfigureThread::readyToRun() {
+ Mutex::Autolock lock(mInputMutex);
+
+ ALOGV("Starting up ConfigureThread");
+ mRequest = NULL;
+ mActive = false;
+ mRunning = true;
+
+ mInputSignal.signal();
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::ConfigureThread::waitUntilRunning() {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mRunning) {
+ ALOGV("Waiting for configure thread to start");
+ mInputSignal.wait(mInputMutex);
+ }
+ return OK;
+}
+
+status_t EmulatedFakeCamera2::ConfigureThread::newRequestAvailable() {
+ waitUntilRunning();
+
+ Mutex::Autolock lock(mInputMutex);
+
+ mActive = true;
+ mInputSignal.signal();
+
+ return OK;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::isStreamInUse(uint32_t id) {
+ Mutex::Autolock lock(mInternalsMutex);
+
+ if (mNextBuffers == NULL) return false;
+ for (size_t i=0; i < mNextBuffers->size(); i++) {
+ if ((*mNextBuffers)[i].streamId == (int)id) return true;
+ }
+ return false;
+}
+
+int EmulatedFakeCamera2::ConfigureThread::getInProgressCount() {
+ Mutex::Autolock lock(mInputMutex);
+ return mRequestCount;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::threadLoop() {
+ status_t res;
+
+ // Check if we're currently processing or just waiting
+ {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mActive) {
+ // Inactive, keep waiting until we've been signaled
+ status_t res;
+ res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
+ if (res != NO_ERROR && res != TIMED_OUT) {
+ ALOGE("%s: Error waiting for input requests: %d",
+ __FUNCTION__, res);
+ return false;
+ }
+ if (!mActive) return true;
+ ALOGV("New request available");
+ }
+ // Active
+ }
+
+ if (mRequest == NULL) {
+ Mutex::Autolock il(mInternalsMutex);
+
+ ALOGV("Configure: Getting next request");
+ res = mParent->mRequestQueueSrc->dequeue_request(
+ mParent->mRequestQueueSrc,
+ &mRequest);
+ if (res != NO_ERROR) {
+ ALOGE("%s: Error dequeuing next request: %d", __FUNCTION__, res);
+ mParent->signalError();
+ return false;
+ }
+ if (mRequest == NULL) {
+ ALOGV("Configure: Request queue empty, going inactive");
+ // No requests available, go into inactive mode
+ Mutex::Autolock lock(mInputMutex);
+ mActive = false;
+ return true;
+ } else {
+ Mutex::Autolock lock(mInputMutex);
+ mRequestCount++;
+ }
+
+ camera_metadata_entry_t type;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_TYPE,
+ &type);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading request type", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ bool success = false;;
+ switch (type.data.u8[0]) {
+ case ANDROID_REQUEST_TYPE_CAPTURE:
+ success = setupCapture();
+ break;
+ case ANDROID_REQUEST_TYPE_REPROCESS:
+ success = setupReprocess();
+ break;
+ default:
+ ALOGE("%s: Unexpected request type %d",
+ __FUNCTION__, type.data.u8[0]);
+ mParent->signalError();
+ break;
+ }
+ if (!success) return false;
+
+ }
+
+ if (mWaitingForReadout) {
+ bool readoutDone;
+ readoutDone = mParent->mReadoutThread->waitForReady(kWaitPerLoop);
+ if (!readoutDone) return true;
+
+ if (mNextNeedsJpeg) {
+ ALOGV("Configure: Waiting for JPEG compressor");
+ } else {
+ ALOGV("Configure: Waiting for sensor");
+ }
+ mWaitingForReadout = false;
+ }
+
+ if (mNextNeedsJpeg) {
+ bool jpegDone;
+ jpegDone = mParent->mJpegCompressor->waitForDone(kWaitPerLoop);
+ if (!jpegDone) return true;
+
+ ALOGV("Configure: Waiting for sensor");
+ mNextNeedsJpeg = false;
+ }
+
+ if (mNextIsCapture) {
+ return configureNextCapture();
+ } else {
+ return configureNextReprocess();
+ }
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::setupCapture() {
+ status_t res;
+
+ mNextIsCapture = true;
+ // Get necessary parameters for sensor config
+ mParent->mControlThread->processRequest(mRequest);
+
+ camera_metadata_entry_t streams;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ &streams);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading output stream tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ mNextBuffers = new Buffers;
+ mNextNeedsJpeg = false;
+ ALOGV("Configure: Setting up buffers for capture");
+ for (size_t i = 0; i < streams.count; i++) {
+ int streamId = streams.data.i32[i];
+ const Stream &s = mParent->getStreamInfo(streamId);
+ if (s.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ ALOGE("%s: Stream %d does not have a concrete pixel format, but "
+ "is included in a request!", __FUNCTION__, streamId);
+ mParent->signalError();
+ return false;
+ }
+ StreamBuffer b;
+ b.streamId = streams.data.u8[i];
+ b.width = s.width;
+ b.height = s.height;
+ b.format = s.format;
+ b.stride = s.stride;
+ mNextBuffers->push_back(b);
+ ALOGV("Configure: Buffer %zu: Stream %d, %d x %d, format 0x%x, "
+ "stride %d",
+ i, b.streamId, b.width, b.height, b.format, b.stride);
+ if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+ mNextNeedsJpeg = true;
+ }
+ }
+
+ camera_metadata_entry_t e;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_FRAME_COUNT,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame count tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ mNextFrameNumber = *e.data.i32;
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_EXPOSURE_TIME,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading exposure time tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ mNextExposureTime = *e.data.i64;
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_FRAME_DURATION,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame duration tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ mNextFrameDuration = *e.data.i64;
+
+ if (mNextFrameDuration <
+ mNextExposureTime + Sensor::kMinVerticalBlank) {
+ mNextFrameDuration = mNextExposureTime + Sensor::kMinVerticalBlank;
+ }
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_SENSITIVITY,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading sensitivity tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ mNextSensitivity = *e.data.i32;
+
+ // Start waiting on readout thread
+ mWaitingForReadout = true;
+ ALOGV("Configure: Waiting for readout thread");
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::configureNextCapture() {
+ bool vsync = mParent->mSensor->waitForVSync(kWaitPerLoop);
+ if (!vsync) return true;
+
+ Mutex::Autolock il(mInternalsMutex);
+ ALOGV("Configure: Configuring sensor for capture %d", mNextFrameNumber);
+ mParent->mSensor->setExposureTime(mNextExposureTime);
+ mParent->mSensor->setFrameDuration(mNextFrameDuration);
+ mParent->mSensor->setSensitivity(mNextSensitivity);
+
+ getBuffers();
+
+ ALOGV("Configure: Done configure for capture %d", mNextFrameNumber);
+ mParent->mReadoutThread->setNextOperation(true, mRequest, mNextBuffers);
+ mParent->mSensor->setDestinationBuffers(mNextBuffers);
+
+ mRequest = NULL;
+ mNextBuffers = NULL;
+
+ Mutex::Autolock lock(mInputMutex);
+ mRequestCount--;
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::setupReprocess() {
+ status_t res;
+
+ mNextNeedsJpeg = true;
+ mNextIsCapture = false;
+
+ camera_metadata_entry_t reprocessStreams;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_INPUT_STREAMS,
+ &reprocessStreams);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading output stream tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ mNextBuffers = new Buffers;
+
+ ALOGV("Configure: Setting up input buffers for reprocess");
+ for (size_t i = 0; i < reprocessStreams.count; i++) {
+ int streamId = reprocessStreams.data.i32[i];
+ const ReprocessStream &s = mParent->getReprocessStreamInfo(streamId);
+ if (s.format != HAL_PIXEL_FORMAT_RGB_888) {
+ ALOGE("%s: Only ZSL reprocessing supported!",
+ __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+ StreamBuffer b;
+ b.streamId = -streamId;
+ b.width = s.width;
+ b.height = s.height;
+ b.format = s.format;
+ b.stride = s.stride;
+ mNextBuffers->push_back(b);
+ }
+
+ camera_metadata_entry_t streams;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ &streams);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading output stream tag", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ ALOGV("Configure: Setting up output buffers for reprocess");
+ for (size_t i = 0; i < streams.count; i++) {
+ int streamId = streams.data.i32[i];
+ const Stream &s = mParent->getStreamInfo(streamId);
+ if (s.format != HAL_PIXEL_FORMAT_BLOB) {
+ // TODO: Support reprocess to YUV
+ ALOGE("%s: Non-JPEG output stream %d for reprocess not supported",
+ __FUNCTION__, streamId);
+ mParent->signalError();
+ return false;
+ }
+ StreamBuffer b;
+ b.streamId = streams.data.u8[i];
+ b.width = s.width;
+ b.height = s.height;
+ b.format = s.format;
+ b.stride = s.stride;
+ mNextBuffers->push_back(b);
+ ALOGV("Configure: Buffer %zu: Stream %d, %d x %d, format 0x%x, "
+ "stride %d",
+ i, b.streamId, b.width, b.height, b.format, b.stride);
+ }
+
+ camera_metadata_entry_t e;
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_FRAME_COUNT,
+ &e);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame count tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ mNextFrameNumber = *e.data.i32;
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::configureNextReprocess() {
+ Mutex::Autolock il(mInternalsMutex);
+
+ getBuffers();
+
+ ALOGV("Configure: Done configure for reprocess %d", mNextFrameNumber);
+ mParent->mReadoutThread->setNextOperation(false, mRequest, mNextBuffers);
+
+ mRequest = NULL;
+ mNextBuffers = NULL;
+
+ Mutex::Autolock lock(mInputMutex);
+ mRequestCount--;
+
+ return true;
+}
+
+bool EmulatedFakeCamera2::ConfigureThread::getBuffers() {
+ status_t res;
+ /** Get buffers to fill for this frame */
+ for (size_t i = 0; i < mNextBuffers->size(); i++) {
+ StreamBuffer &b = mNextBuffers->editItemAt(i);
+
+ if (b.streamId > 0) {
+ Stream s = mParent->getStreamInfo(b.streamId);
+ ALOGV("Configure: Dequeing buffer from stream %d", b.streamId);
+ res = s.ops->dequeue_buffer(s.ops, &(b.buffer) );
+ if (res != NO_ERROR || b.buffer == NULL) {
+ ALOGE("%s: Unable to dequeue buffer from stream %d: %s (%d)",
+ __FUNCTION__, b.streamId, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+
+ /* Lock the buffer from the perspective of the graphics mapper */
+ const Rect rect(s.width, s.height);
+
+ res = GraphicBufferMapper::get().lock(*(b.buffer),
+ GRALLOC_USAGE_HW_CAMERA_WRITE,
+ rect, (void**)&(b.img) );
+
+ if (res != NO_ERROR) {
+ ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ s.ops->cancel_buffer(s.ops,
+ b.buffer);
+ mParent->signalError();
+ return false;
+ }
+ } else {
+ ReprocessStream s = mParent->getReprocessStreamInfo(-b.streamId);
+ ALOGV("Configure: Acquiring buffer from reprocess stream %d",
+ -b.streamId);
+ res = s.ops->acquire_buffer(s.ops, &(b.buffer) );
+ if (res != NO_ERROR || b.buffer == NULL) {
+ ALOGE("%s: Unable to acquire buffer from reprocess stream %d: "
+ "%s (%d)", __FUNCTION__, -b.streamId,
+ strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+
+ /* Lock the buffer from the perspective of the graphics mapper */
+ const Rect rect(s.width, s.height);
+
+ res = GraphicBufferMapper::get().lock(*(b.buffer),
+ GRALLOC_USAGE_HW_CAMERA_READ,
+ rect, (void**)&(b.img) );
+ if (res != NO_ERROR) {
+ ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ s.ops->release_buffer(s.ops,
+ b.buffer);
+ mParent->signalError();
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+EmulatedFakeCamera2::ReadoutThread::ReadoutThread(EmulatedFakeCamera2 *parent):
+ Thread(false),
+ mParent(parent),
+ mRunning(false),
+ mActive(false),
+ mRequestCount(0),
+ mRequest(NULL),
+ mBuffers(NULL) {
+ mInFlightQueue = new InFlightQueue[kInFlightQueueSize];
+ mInFlightHead = 0;
+ mInFlightTail = 0;
+}
+
+EmulatedFakeCamera2::ReadoutThread::~ReadoutThread() {
+ delete mInFlightQueue;
+}
+
+status_t EmulatedFakeCamera2::ReadoutThread::readyToRun() {
+ Mutex::Autolock lock(mInputMutex);
+ ALOGV("Starting up ReadoutThread");
+ mRunning = true;
+ mInputSignal.signal();
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::ReadoutThread::waitUntilRunning() {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mRunning) {
+ ALOGV("Waiting for readout thread to start");
+ mInputSignal.wait(mInputMutex);
+ }
+ return OK;
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::waitForReady(nsecs_t timeout) {
+ status_t res;
+ Mutex::Autolock lock(mInputMutex);
+ while (!readyForNextCapture()) {
+ res = mReadySignal.waitRelative(mInputMutex, timeout);
+ if (res == TIMED_OUT) return false;
+ if (res != OK) {
+ ALOGE("%s: Error waiting for ready: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::readyForNextCapture() {
+ return (mInFlightTail + 1) % kInFlightQueueSize != mInFlightHead;
+}
+
+void EmulatedFakeCamera2::ReadoutThread::setNextOperation(
+ bool isCapture,
+ camera_metadata_t *request,
+ Buffers *buffers) {
+ Mutex::Autolock lock(mInputMutex);
+ if ( !readyForNextCapture() ) {
+ ALOGE("In flight queue full, dropping captures");
+ mParent->signalError();
+ return;
+ }
+ mInFlightQueue[mInFlightTail].isCapture = isCapture;
+ mInFlightQueue[mInFlightTail].request = request;
+ mInFlightQueue[mInFlightTail].buffers = buffers;
+ mInFlightTail = (mInFlightTail + 1) % kInFlightQueueSize;
+ mRequestCount++;
+
+ if (!mActive) {
+ mActive = true;
+ mInputSignal.signal();
+ }
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::isStreamInUse(uint32_t id) {
+ // acquire in same order as threadLoop
+ Mutex::Autolock iLock(mInternalsMutex);
+ Mutex::Autolock lock(mInputMutex);
+
+ size_t i = mInFlightHead;
+ while (i != mInFlightTail) {
+ for (size_t j = 0; j < mInFlightQueue[i].buffers->size(); j++) {
+ if ( (*(mInFlightQueue[i].buffers))[j].streamId == (int)id )
+ return true;
+ }
+ i = (i + 1) % kInFlightQueueSize;
+ }
+
+
+ if (mBuffers != NULL) {
+ for (i = 0; i < mBuffers->size(); i++) {
+ if ( (*mBuffers)[i].streamId == (int)id) return true;
+ }
+ }
+
+ return false;
+}
+
+int EmulatedFakeCamera2::ReadoutThread::getInProgressCount() {
+ Mutex::Autolock lock(mInputMutex);
+
+ return mRequestCount;
+}
+
+bool EmulatedFakeCamera2::ReadoutThread::threadLoop() {
+ static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
+ status_t res;
+ int32_t frameNumber;
+
+ // Check if we're currently processing or just waiting
+ {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mActive) {
+ // Inactive, keep waiting until we've been signaled
+ res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
+ if (res != NO_ERROR && res != TIMED_OUT) {
+ ALOGE("%s: Error waiting for capture requests: %d",
+ __FUNCTION__, res);
+ mParent->signalError();
+ return false;
+ }
+ if (!mActive) return true;
+ }
+ // Active, see if we need a new request
+ if (mRequest == NULL) {
+ if (mInFlightHead == mInFlightTail) {
+ // Go inactive
+ ALOGV("Waiting for sensor data");
+ mActive = false;
+ return true;
+ } else {
+ Mutex::Autolock iLock(mInternalsMutex);
+ mReadySignal.signal();
+ mIsCapture = mInFlightQueue[mInFlightHead].isCapture;
+ mRequest = mInFlightQueue[mInFlightHead].request;
+ mBuffers = mInFlightQueue[mInFlightHead].buffers;
+ mInFlightQueue[mInFlightHead].request = NULL;
+ mInFlightQueue[mInFlightHead].buffers = NULL;
+ mInFlightHead = (mInFlightHead + 1) % kInFlightQueueSize;
+ ALOGV("Ready to read out request %p, %zu buffers",
+ mRequest, mBuffers->size());
+ }
+ }
+ }
+
+ // Active with request, wait on sensor to complete
+
+ nsecs_t captureTime;
+
+ if (mIsCapture) {
+ bool gotFrame;
+ gotFrame = mParent->mSensor->waitForNewFrame(kWaitPerLoop,
+ &captureTime);
+
+ if (!gotFrame) return true;
+ }
+
+ Mutex::Autolock iLock(mInternalsMutex);
+
+ camera_metadata_entry_t entry;
+ if (!mIsCapture) {
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_SENSOR_TIMESTAMP,
+ &entry);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading reprocessing timestamp: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ captureTime = entry.data.i64[0];
+ }
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_FRAME_COUNT,
+ &entry);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading frame count tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+ frameNumber = *entry.data.i32;
+
+ res = find_camera_metadata_entry(mRequest,
+ ANDROID_REQUEST_METADATA_MODE,
+ &entry);
+ if (res != NO_ERROR) {
+ ALOGE("%s: error reading metadata mode tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ mParent->signalError();
+ return false;
+ }
+
+ // Got sensor data and request, construct frame and send it out
+ ALOGV("Readout: Constructing metadata and frames for request %d",
+ frameNumber);
+
+ if (*entry.data.u8 == ANDROID_REQUEST_METADATA_MODE_FULL) {
+ ALOGV("Readout: Metadata requested, constructing");
+
+ camera_metadata_t *frame = NULL;
+
+ size_t frame_entries = get_camera_metadata_entry_count(mRequest);
+ size_t frame_data = get_camera_metadata_data_count(mRequest);
+
+ // TODO: Dynamically calculate based on enabled statistics, etc
+ frame_entries += 10;
+ frame_data += 100;
+
+ res = mParent->mFrameQueueDst->dequeue_frame(mParent->mFrameQueueDst,
+ frame_entries, frame_data, &frame);
+
+ if (res != NO_ERROR || frame == NULL) {
+ ALOGE("%s: Unable to dequeue frame metadata buffer", __FUNCTION__);
+ mParent->signalError();
+ return false;
+ }
+
+ res = append_camera_metadata(frame, mRequest);
+ if (res != NO_ERROR) {
+ ALOGE("Unable to append request metadata");
+ }
+
+ if (mIsCapture) {
+ add_camera_metadata_entry(frame,
+ ANDROID_SENSOR_TIMESTAMP,
+ &captureTime,
+ 1);
+
+ collectStatisticsMetadata(frame);
+ // TODO: Collect all final values used from sensor in addition to timestamp
+ }
+
+ ALOGV("Readout: Enqueue frame %d", frameNumber);
+ mParent->mFrameQueueDst->enqueue_frame(mParent->mFrameQueueDst,
+ frame);
+ }
+ ALOGV("Readout: Free request");
+ res = mParent->mRequestQueueSrc->free_request(mParent->mRequestQueueSrc, mRequest);
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to return request buffer to queue: %d",
+ __FUNCTION__, res);
+ mParent->signalError();
+ return false;
+ }
+ mRequest = NULL;
+
+ int compressedBufferIndex = -1;
+ ALOGV("Readout: Processing %zu buffers", mBuffers->size());
+ for (size_t i = 0; i < mBuffers->size(); i++) {
+ const StreamBuffer &b = (*mBuffers)[i];
+ ALOGV("Readout: Buffer %zu: Stream %d, %d x %d, format 0x%x, stride %d",
+ i, b.streamId, b.width, b.height, b.format, b.stride);
+ if (b.streamId > 0) {
+ if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+ // Assumes only one BLOB buffer type per capture
+ compressedBufferIndex = i;
+ } else {
+ ALOGV("Readout: Sending image buffer %zu (%p) to output stream %d",
+ i, (void*)*(b.buffer), b.streamId);
+ GraphicBufferMapper::get().unlock(*(b.buffer));
+ const Stream &s = mParent->getStreamInfo(b.streamId);
+ res = s.ops->enqueue_buffer(s.ops, captureTime, b.buffer);
+ if (res != OK) {
+ ALOGE("Error enqueuing image buffer %p: %s (%d)", b.buffer,
+ strerror(-res), res);
+ mParent->signalError();
+ }
+ }
+ }
+ }
+
+ if (compressedBufferIndex == -1) {
+ delete mBuffers;
+ } else {
+ ALOGV("Readout: Starting JPEG compression for buffer %d, stream %d",
+ compressedBufferIndex,
+ (*mBuffers)[compressedBufferIndex].streamId);
+ mJpegTimestamp = captureTime;
+ // Takes ownership of mBuffers
+ mParent->mJpegCompressor->start(mBuffers, this);
+ }
+ mBuffers = NULL;
+
+ Mutex::Autolock l(mInputMutex);
+ mRequestCount--;
+ ALOGV("Readout: Done with request %d", frameNumber);
+ return true;
+}
+
+void EmulatedFakeCamera2::ReadoutThread::onJpegDone(
+ const StreamBuffer &jpegBuffer, bool success) {
+ status_t res;
+ if (!success) {
+ ALOGE("%s: Error queueing compressed image buffer %p",
+ __FUNCTION__, jpegBuffer.buffer);
+ mParent->signalError();
+ return;
+ }
+
+ // Write to JPEG output stream
+ ALOGV("%s: Compression complete, pushing to stream %d", __FUNCTION__,
+ jpegBuffer.streamId);
+
+ GraphicBufferMapper::get().unlock(*(jpegBuffer.buffer));
+ const Stream &s = mParent->getStreamInfo(jpegBuffer.streamId);
+ res = s.ops->enqueue_buffer(s.ops, mJpegTimestamp, jpegBuffer.buffer);
+}
+
+void EmulatedFakeCamera2::ReadoutThread::onJpegInputDone(
+ const StreamBuffer &inputBuffer) {
+ status_t res;
+ GraphicBufferMapper::get().unlock(*(inputBuffer.buffer));
+ const ReprocessStream &s =
+ mParent->getReprocessStreamInfo(-inputBuffer.streamId);
+ res = s.ops->release_buffer(s.ops, inputBuffer.buffer);
+ if (res != OK) {
+ ALOGE("Error releasing reprocess buffer %p: %s (%d)",
+ inputBuffer.buffer, strerror(-res), res);
+ mParent->signalError();
+ }
+}
+
+status_t EmulatedFakeCamera2::ReadoutThread::collectStatisticsMetadata(
+ camera_metadata_t *frame) {
+ // Completely fake face rectangles, don't correspond to real faces in scene
+ ALOGV("Readout: Collecting statistics metadata");
+
+ status_t res;
+ camera_metadata_entry_t entry;
+ res = find_camera_metadata_entry(frame,
+ ANDROID_STATISTICS_FACE_DETECT_MODE,
+ &entry);
+ if (res != OK) {
+ ALOGE("%s: Unable to find face detect mode!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) return OK;
+
+ // The coordinate system for the face regions is the raw sensor pixel
+ // coordinates. Here, we map from the scene coordinates (0-19 in both axis)
+ // to raw pixels, for the scene defined in fake-pipeline2/Scene.cpp. We
+ // approximately place two faces on top of the windows of the house. No
+ // actual faces exist there, but might one day. Note that this doesn't
+ // account for the offsets used to account for aspect ratio differences, so
+ // the rectangles don't line up quite right.
+ const size_t numFaces = 2;
+ int32_t rects[numFaces * 4] = {
+ static_cast(Sensor::kResolution[0] * 10 / 20),
+ static_cast(Sensor::kResolution[1] * 15 / 20),
+ static_cast(Sensor::kResolution[0] * 12 / 20),
+ static_cast(Sensor::kResolution[1] * 17 / 20),
+
+ static_cast(Sensor::kResolution[0] * 16 / 20),
+ static_cast(Sensor::kResolution[1] * 15 / 20),
+ static_cast(Sensor::kResolution[0] * 18 / 20),
+ static_cast(Sensor::kResolution[1] * 17 / 20)
+ };
+ // To simulate some kind of real detection going on, we jitter the rectangles on
+ // each frame by a few pixels in each dimension.
+ for (size_t i = 0; i < numFaces * 4; i++) {
+ rects[i] += (int32_t)(((float)rand() / RAND_MAX) * 6 - 3);
+ }
+ // The confidence scores (0-100) are similarly jittered.
+ uint8_t scores[numFaces] = { 85, 95 };
+ for (size_t i = 0; i < numFaces; i++) {
+ scores[i] += (int32_t)(((float)rand() / RAND_MAX) * 10 - 5);
+ }
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_RECTANGLES,
+ rects, numFaces * 4);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face rectangles!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_SCORES,
+ scores, numFaces);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face scores!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE) return OK;
+
+ // Advanced face detection options - add eye/mouth coordinates. The
+ // coordinates in order are (leftEyeX, leftEyeY, rightEyeX, rightEyeY,
+ // mouthX, mouthY). The mapping is the same as the face rectangles.
+ int32_t features[numFaces * 6] = {
+ static_cast(Sensor::kResolution[0] * 10.5 / 20),
+ static_cast(Sensor::kResolution[1] * 16 / 20),
+ static_cast(Sensor::kResolution[0] * 11.5 / 20),
+ static_cast(Sensor::kResolution[1] * 16 / 20),
+ static_cast(Sensor::kResolution[0] * 11 / 20),
+ static_cast(Sensor::kResolution[1] * 16.5 / 20),
+
+ static_cast(Sensor::kResolution[0] * 16.5 / 20),
+ static_cast(Sensor::kResolution[1] * 16 / 20),
+ static_cast(Sensor::kResolution[0] * 17.5 / 20),
+ static_cast(Sensor::kResolution[1] * 16 / 20),
+ static_cast(Sensor::kResolution[0] * 17 / 20),
+ static_cast(Sensor::kResolution[1] * 16.5 / 20),
+ };
+ // Jitter these a bit less than the rects
+ for (size_t i = 0; i < numFaces * 6; i++) {
+ features[i] += (int32_t)(((float)rand() / RAND_MAX) * 4 - 2);
+ }
+ // These are unique IDs that are used to identify each face while it's
+ // visible to the detector (if a face went away and came back, it'd get a
+ // new ID).
+ int32_t ids[numFaces] = {
+ 100, 200
+ };
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_LANDMARKS,
+ features, numFaces * 6);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face landmarks!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_IDS,
+ ids, numFaces);
+ if (res != OK) {
+ ALOGE("%s: Unable to add face scores!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ return OK;
+}
+
+EmulatedFakeCamera2::ControlThread::ControlThread(EmulatedFakeCamera2 *parent):
+ Thread(false),
+ mParent(parent) {
+ mRunning = false;
+}
+
+EmulatedFakeCamera2::ControlThread::~ControlThread() {
+}
+
+status_t EmulatedFakeCamera2::ControlThread::readyToRun() {
+ Mutex::Autolock lock(mInputMutex);
+
+ ALOGV("Starting up ControlThread");
+ mRunning = true;
+ mStartAf = false;
+ mCancelAf = false;
+ mStartPrecapture = false;
+
+ mControlMode = ANDROID_CONTROL_MODE_AUTO;
+
+ mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ mSceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+
+ mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ mAfModeChange = false;
+
+ mAeMode = ANDROID_CONTROL_AE_MODE_ON;
+ mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+
+ mAfTriggerId = 0;
+ mPrecaptureTriggerId = 0;
+
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+
+ mExposureTime = kNormalExposureTime;
+
+ mInputSignal.signal();
+ return NO_ERROR;
+}
+
+status_t EmulatedFakeCamera2::ControlThread::waitUntilRunning() {
+ Mutex::Autolock lock(mInputMutex);
+ if (!mRunning) {
+ ALOGV("Waiting for control thread to start");
+ mInputSignal.wait(mInputMutex);
+ }
+ return OK;
+}
+
+// Override android.control.* fields with 3A values before sending request to sensor
+status_t EmulatedFakeCamera2::ControlThread::processRequest(camera_metadata_t *request) {
+ Mutex::Autolock lock(mInputMutex);
+ // TODO: Add handling for all android.control.* fields here
+ camera_metadata_entry_t mode;
+ status_t res;
+
+#define READ_IF_OK(res, what, def) \
+ (((res) == OK) ? (what) : (uint8_t)(def))
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_MODE,
+ &mode);
+ mControlMode = READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_MODE_OFF);
+
+ // disable all 3A
+ if (mControlMode == ANDROID_CONTROL_MODE_OFF) {
+ mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ mSceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED;
+ mAfMode = ANDROID_CONTROL_AF_MODE_OFF;
+ mAeLock = ANDROID_CONTROL_AE_LOCK_ON;
+ mAeMode = ANDROID_CONTROL_AE_MODE_OFF;
+ mAfModeChange = true;
+ mStartAf = false;
+ mCancelAf = true;
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAwbMode = ANDROID_CONTROL_AWB_MODE_OFF;
+ return res;
+ }
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_EFFECT_MODE,
+ &mode);
+ mEffectMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_EFFECT_MODE_OFF);
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_SCENE_MODE,
+ &mode);
+ mSceneMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_SCENE_MODE_DISABLED);
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AF_MODE,
+ &mode);
+ if (mAfMode != mode.data.u8[0]) {
+ ALOGV("AF new mode: %d, old mode %d", mode.data.u8[0], mAfMode);
+ mAfMode = mode.data.u8[0];
+ mAfModeChange = true;
+ mStartAf = false;
+ mCancelAf = false;
+ }
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AE_MODE,
+ &mode);
+ mAeMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_AE_MODE_OFF);
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AE_LOCK,
+ &mode);
+ uint8_t aeLockVal = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_AE_LOCK_ON);
+ bool aeLock = (aeLockVal == ANDROID_CONTROL_AE_LOCK_ON);
+ if (mAeLock && !aeLock) {
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ }
+ mAeLock = aeLock;
+
+ res = find_camera_metadata_entry(request,
+ ANDROID_CONTROL_AWB_MODE,
+ &mode);
+ mAwbMode = READ_IF_OK(res, mode.data.u8[0],
+ ANDROID_CONTROL_AWB_MODE_OFF);
+
+ // TODO: Override more control fields
+
+ if (mAeMode != ANDROID_CONTROL_AE_MODE_OFF) {
+ camera_metadata_entry_t exposureTime;
+ res = find_camera_metadata_entry(request,
+ ANDROID_SENSOR_EXPOSURE_TIME,
+ &exposureTime);
+ if (res == OK) {
+ exposureTime.data.i64[0] = mExposureTime;
+ }
+ }
+
+#undef READ_IF_OK
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera2::ControlThread::triggerAction(uint32_t msgType,
+ int32_t ext1, int32_t ext2) {
+ ALOGV("%s: Triggering %d (%d, %d)", __FUNCTION__, msgType, ext1, ext2);
+ Mutex::Autolock lock(mInputMutex);
+ switch (msgType) {
+ case CAMERA2_TRIGGER_AUTOFOCUS:
+ mAfTriggerId = ext1;
+ mStartAf = true;
+ mCancelAf = false;
+ break;
+ case CAMERA2_TRIGGER_CANCEL_AUTOFOCUS:
+ mAfTriggerId = ext1;
+ mStartAf = false;
+ mCancelAf = true;
+ break;
+ case CAMERA2_TRIGGER_PRECAPTURE_METERING:
+ mPrecaptureTriggerId = ext1;
+ mStartPrecapture = true;
+ break;
+ default:
+ ALOGE("%s: Unknown action triggered: %d (arguments %d %d)",
+ __FUNCTION__, msgType, ext1, ext2);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+const nsecs_t EmulatedFakeCamera2::ControlThread::kControlCycleDelay = 100 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinAfDuration = 500 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxAfDuration = 900 * MSEC;
+const float EmulatedFakeCamera2::ControlThread::kAfSuccessRate = 0.9;
+ // Once every 5 seconds
+const float EmulatedFakeCamera2::ControlThread::kContinuousAfStartRate =
+ kControlCycleDelay / 5.0 * SEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinAeDuration = 500 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxAeDuration = 2 * SEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinPrecaptureAeDuration = 100 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxPrecaptureAeDuration = 400 * MSEC;
+ // Once every 3 seconds
+const float EmulatedFakeCamera2::ControlThread::kAeScanStartRate =
+ kControlCycleDelay / 3000000000.0;
+
+const nsecs_t EmulatedFakeCamera2::ControlThread::kNormalExposureTime = 10 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kExposureJump = 2 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinExposureTime = 1 * MSEC;
+
+bool EmulatedFakeCamera2::ControlThread::threadLoop() {
+ bool afModeChange = false;
+ bool afTriggered = false;
+ bool afCancelled = false;
+ uint8_t afState;
+ uint8_t afMode;
+ int32_t afTriggerId;
+ bool precaptureTriggered = false;
+ uint8_t aeState;
+ uint8_t aeMode;
+ bool aeLock;
+ int32_t precaptureTriggerId;
+ nsecs_t nextSleep = kControlCycleDelay;
+
+ {
+ Mutex::Autolock lock(mInputMutex);
+ if (mStartAf) {
+ ALOGD("Starting AF trigger processing");
+ afTriggered = true;
+ mStartAf = false;
+ } else if (mCancelAf) {
+ ALOGD("Starting cancel AF trigger processing");
+ afCancelled = true;
+ mCancelAf = false;
+ }
+ afState = mAfState;
+ afMode = mAfMode;
+ afModeChange = mAfModeChange;
+ mAfModeChange = false;
+
+ afTriggerId = mAfTriggerId;
+
+ if(mStartPrecapture) {
+ ALOGD("Starting precapture trigger processing");
+ precaptureTriggered = true;
+ mStartPrecapture = false;
+ }
+ aeState = mAeState;
+ aeMode = mAeMode;
+ aeLock = mAeLock;
+ precaptureTriggerId = mPrecaptureTriggerId;
+ }
+
+ if (afCancelled || afModeChange) {
+ ALOGV("Resetting AF state due to cancel/mode change");
+ afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ updateAfState(afState, afTriggerId);
+ mAfScanDuration = 0;
+ mLockAfterPassiveScan = false;
+ }
+
+ uint8_t oldAfState = afState;
+
+ if (afTriggered) {
+ afState = processAfTrigger(afMode, afState);
+ }
+
+ afState = maybeStartAfScan(afMode, afState);
+ afState = updateAfScan(afMode, afState, &nextSleep);
+ updateAfState(afState, afTriggerId);
+
+ if (precaptureTriggered) {
+ aeState = processPrecaptureTrigger(aeMode, aeState);
+ }
+
+ aeState = maybeStartAeScan(aeMode, aeLock, aeState);
+ aeState = updateAeScan(aeMode, aeLock, aeState, &nextSleep);
+ updateAeState(aeState, precaptureTriggerId);
+
+ int ret;
+ timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = nextSleep;
+ do {
+ ret = nanosleep(&t, &t);
+ } while (ret != 0);
+
+ if (mAfScanDuration > 0) {
+ mAfScanDuration -= nextSleep;
+ }
+ if (mAeScanDuration > 0) {
+ mAeScanDuration -= nextSleep;
+ }
+
+ return true;
+}
+
+int EmulatedFakeCamera2::ControlThread::processAfTrigger(uint8_t afMode,
+ uint8_t afState) {
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_OFF:
+ case ANDROID_CONTROL_AF_MODE_EDOF:
+ // Do nothing
+ break;
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ case ANDROID_CONTROL_AF_MODE_AUTO:
+ switch (afState) {
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ // Start new focusing cycle
+ mAfScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxAfDuration - kMinAfDuration) + kMinAfDuration;
+ afState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
+ ALOGV("%s: AF scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAfScanDuration / 1000000);
+ break;
+ case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+ // Ignore new request, already scanning
+ break;
+ default:
+ ALOGE("Unexpected AF state in AUTO/MACRO AF mode: %d",
+ afState);
+ }
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ switch (afState) {
+ // Picture mode waits for passive scan to complete
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+ mLockAfterPassiveScan = true;
+ break;
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ // Must cancel to get out of these states
+ break;
+ default:
+ ALOGE("Unexpected AF state in CONTINUOUS_PICTURE AF mode: %d",
+ afState);
+ }
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ switch (afState) {
+ // Video mode does not wait for passive scan to complete
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+ case ANDROID_CONTROL_AF_STATE_INACTIVE:
+ afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ break;
+ case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+ case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+ // Must cancel to get out of these states
+ break;
+ default:
+ ALOGE("Unexpected AF state in CONTINUOUS_VIDEO AF mode: %d",
+ afState);
+ }
+ break;
+ default:
+ break;
+ }
+ return afState;
+}
+
+int EmulatedFakeCamera2::ControlThread::maybeStartAfScan(uint8_t afMode,
+ uint8_t afState) {
+ if ((afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO ||
+ afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE) &&
+ (afState == ANDROID_CONTROL_AF_STATE_INACTIVE ||
+ afState == ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED)) {
+
+ bool startScan = ((double)rand() / RAND_MAX) < kContinuousAfStartRate;
+ if (startScan) {
+ // Start new passive focusing cycle
+ mAfScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxAfDuration - kMinAfDuration) + kMinAfDuration;
+ afState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
+ ALOGV("%s: AF passive scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAfScanDuration / 1000000);
+ }
+ }
+ return afState;
+}
+
+int EmulatedFakeCamera2::ControlThread::updateAfScan(uint8_t afMode,
+ uint8_t afState, nsecs_t *maxSleep) {
+ if (! (afState == ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN ||
+ afState == ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN ) ) {
+ return afState;
+ }
+
+ if (mAfScanDuration <= 0) {
+ ALOGV("%s: AF scan done", __FUNCTION__);
+ switch (afMode) {
+ case ANDROID_CONTROL_AF_MODE_MACRO:
+ case ANDROID_CONTROL_AF_MODE_AUTO: {
+ bool success = ((double)rand() / RAND_MAX) < kAfSuccessRate;
+ if (success) {
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ } else {
+ afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+ }
+ break;
+ }
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+ if (mLockAfterPassiveScan) {
+ afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+ mLockAfterPassiveScan = false;
+ } else {
+ afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+ }
+ break;
+ case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+ afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+ break;
+ default:
+ ALOGE("Unexpected AF mode in scan state");
+ }
+ } else {
+ if (mAfScanDuration <= *maxSleep) {
+ *maxSleep = mAfScanDuration;
+ }
+ }
+ return afState;
+}
+
+void EmulatedFakeCamera2::ControlThread::updateAfState(uint8_t newState,
+ int32_t triggerId) {
+ Mutex::Autolock lock(mInputMutex);
+ if (mAfState != newState) {
+ ALOGV("%s: Autofocus state now %d, id %d", __FUNCTION__,
+ newState, triggerId);
+ mAfState = newState;
+ mParent->sendNotification(CAMERA2_MSG_AUTOFOCUS,
+ newState, triggerId, 0);
+ }
+}
+
+int EmulatedFakeCamera2::ControlThread::processPrecaptureTrigger(uint8_t aeMode,
+ uint8_t aeState) {
+ switch (aeMode) {
+ case ANDROID_CONTROL_AE_MODE_OFF:
+ // Don't do anything for these
+ return aeState;
+ case ANDROID_CONTROL_AE_MODE_ON:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
+ // Trigger a precapture cycle
+ aeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
+ mAeScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxPrecaptureAeDuration - kMinPrecaptureAeDuration) +
+ kMinPrecaptureAeDuration;
+ ALOGD("%s: AE precapture scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAeScanDuration / 1000000);
+
+ }
+ return aeState;
+}
+
+int EmulatedFakeCamera2::ControlThread::maybeStartAeScan(uint8_t aeMode,
+ bool aeLocked,
+ uint8_t aeState) {
+ if (aeLocked) return aeState;
+ switch (aeMode) {
+ case ANDROID_CONTROL_AE_MODE_OFF:
+ break;
+ case ANDROID_CONTROL_AE_MODE_ON:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE: {
+ if (aeState != ANDROID_CONTROL_AE_STATE_INACTIVE &&
+ aeState != ANDROID_CONTROL_AE_STATE_CONVERGED) break;
+
+ bool startScan = ((double)rand() / RAND_MAX) < kAeScanStartRate;
+ if (startScan) {
+ mAeScanDuration = ((double)rand() / RAND_MAX) *
+ (kMaxAeDuration - kMinAeDuration) + kMinAeDuration;
+ aeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
+ ALOGV("%s: AE scan start, duration %" PRId64 " ms",
+ __FUNCTION__, mAeScanDuration / 1000000);
+ }
+ }
+ }
+
+ return aeState;
+}
+
+int EmulatedFakeCamera2::ControlThread::updateAeScan(uint8_t aeMode,
+ bool aeLock, uint8_t aeState, nsecs_t *maxSleep) {
+ if (aeLock && aeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ mAeScanDuration = 0;
+ aeState = ANDROID_CONTROL_AE_STATE_LOCKED;
+ } else if ((aeState == ANDROID_CONTROL_AE_STATE_SEARCHING) ||
+ (aeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE ) ) {
+ if (mAeScanDuration <= 0) {
+ ALOGV("%s: AE scan done", __FUNCTION__);
+ aeState = aeLock ?
+ ANDROID_CONTROL_AE_STATE_LOCKED :ANDROID_CONTROL_AE_STATE_CONVERGED;
+
+ Mutex::Autolock lock(mInputMutex);
+ mExposureTime = kNormalExposureTime;
+ } else {
+ if (mAeScanDuration <= *maxSleep) {
+ *maxSleep = mAeScanDuration;
+ }
+
+ int64_t exposureDelta =
+ ((double)rand() / RAND_MAX) * 2 * kExposureJump -
+ kExposureJump;
+ Mutex::Autolock lock(mInputMutex);
+ mExposureTime = mExposureTime + exposureDelta;
+ if (mExposureTime < kMinExposureTime) mExposureTime = kMinExposureTime;
+ }
+ }
+
+ return aeState;
+}
+
+
+void EmulatedFakeCamera2::ControlThread::updateAeState(uint8_t newState,
+ int32_t triggerId) {
+ Mutex::Autolock lock(mInputMutex);
+ if (mAeState != newState) {
+ ALOGV("%s: Autoexposure state now %d, id %d", __FUNCTION__,
+ newState, triggerId);
+ mAeState = newState;
+ mParent->sendNotification(CAMERA2_MSG_AUTOEXPOSURE,
+ newState, triggerId, 0);
+ }
+}
+
+/** Private methods */
+
+status_t EmulatedFakeCamera2::constructStaticInfo(
+ camera_metadata_t **info,
+ bool sizeRequest) const {
+
+ size_t entryCount = 0;
+ size_t dataCount = 0;
+ status_t ret;
+
+#define ADD_OR_SIZE( tag, data, count ) \
+ if ( ( ret = addOrSize(*info, sizeRequest, &entryCount, &dataCount, \
+ tag, data, count) ) != OK ) return ret
+
+ // android.lens
+
+ // 5 cm min focus distance for back camera, infinity (fixed focus) for front
+ const float minFocusDistance = mFacingBack ? 1.0/0.05 : 0.0;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ &minFocusDistance, 1);
+ // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
+ const float hyperFocalDistance = mFacingBack ? 1.0/5.0 : 0.0;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ &minFocusDistance, 1);
+
+ static const float focalLength = 3.30f; // mm
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ &focalLength, 1);
+ static const float aperture = 2.8f;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ &aperture, 1);
+ static const float filterDensity = 0;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+ &filterDensity, 1);
+ static const uint8_t availableOpticalStabilization =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ &availableOpticalStabilization, 1);
+
+ static const int32_t lensShadingMapSize[] = {1, 1};
+ ADD_OR_SIZE(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
+ sizeof(lensShadingMapSize)/sizeof(int32_t));
+
+ int32_t lensFacing = mFacingBack ?
+ ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+ ADD_OR_SIZE(ANDROID_LENS_FACING, &lensFacing, 1);
+
+ // android.sensor
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ Sensor::kExposureTimeRange, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ &Sensor::kFrameDurationRange[1], 1);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ Sensor::kSensitivityRange,
+ sizeof(Sensor::kSensitivityRange)
+ /sizeof(int32_t));
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ &Sensor::kColorFilterArrangement, 1);
+
+ static const float sensorPhysicalSize[2] = {3.20f, 2.40f}; // mm
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ sensorPhysicalSize, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ Sensor::kResolution, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ Sensor::kResolution, 2);
+
+ ADD_OR_SIZE(ANDROID_SENSOR_INFO_WHITE_LEVEL,
+ &Sensor::kMaxRawValue, 1);
+
+ static const int32_t blackLevelPattern[4] = {
+ static_cast(Sensor::kBlackLevel),
+ static_cast(Sensor::kBlackLevel),
+ static_cast(Sensor::kBlackLevel),
+ static_cast(Sensor::kBlackLevel)
+ };
+ ADD_OR_SIZE(ANDROID_SENSOR_BLACK_LEVEL_PATTERN,
+ blackLevelPattern, sizeof(blackLevelPattern)/sizeof(int32_t));
+
+ //TODO: sensor color calibration fields
+
+ // android.flash
+ static const uint8_t flashAvailable = 0;
+ ADD_OR_SIZE(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+
+ static const int64_t flashChargeDuration = 0;
+ ADD_OR_SIZE(ANDROID_FLASH_INFO_CHARGE_DURATION, &flashChargeDuration, 1);
+
+ // android.tonemap
+
+ static const int32_t tonemapCurvePoints = 128;
+ ADD_OR_SIZE(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
+
+ // android.scaler
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_FORMATS,
+ kAvailableFormats,
+ sizeof(kAvailableFormats)/sizeof(uint32_t));
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+ kAvailableRawSizes,
+ sizeof(kAvailableRawSizes)/sizeof(uint32_t));
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+ kAvailableRawMinDurations,
+ sizeof(kAvailableRawMinDurations)/sizeof(uint64_t));
+
+ if (mFacingBack) {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ kAvailableProcessedSizesBack,
+ sizeof(kAvailableProcessedSizesBack)/sizeof(uint32_t));
+ } else {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+ kAvailableProcessedSizesFront,
+ sizeof(kAvailableProcessedSizesFront)/sizeof(uint32_t));
+ }
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
+ kAvailableProcessedMinDurations,
+ sizeof(kAvailableProcessedMinDurations)/sizeof(uint64_t));
+
+ if (mFacingBack) {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ kAvailableJpegSizesBack,
+ sizeof(kAvailableJpegSizesBack)/sizeof(uint32_t));
+ } else {
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
+ kAvailableJpegSizesFront,
+ sizeof(kAvailableJpegSizesFront)/sizeof(uint32_t));
+ }
+
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
+ kAvailableJpegMinDurations,
+ sizeof(kAvailableJpegMinDurations)/sizeof(uint64_t));
+
+ static const float maxZoom = 10;
+ ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ &maxZoom, 1);
+
+ // android.jpeg
+
+ static const int32_t jpegThumbnailSizes[] = {
+ 0, 0,
+ 160, 120,
+ 320, 240
+ };
+ ADD_OR_SIZE(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ jpegThumbnailSizes, sizeof(jpegThumbnailSizes)/sizeof(int32_t));
+
+ static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
+ ADD_OR_SIZE(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
+
+ // android.stats
+
+ static const uint8_t availableFaceDetectModes[] = {
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
+ ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
+ ANDROID_STATISTICS_FACE_DETECT_MODE_FULL
+ };
+
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ availableFaceDetectModes,
+ sizeof(availableFaceDetectModes));
+
+ static const int32_t maxFaceCount = 8;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ &maxFaceCount, 1);
+
+ static const int32_t histogramSize = 64;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+ &histogramSize, 1);
+
+ static const int32_t maxHistogramCount = 1000;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+ &maxHistogramCount, 1);
+
+ static const int32_t sharpnessMapSize[2] = {64, 64};
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
+ sharpnessMapSize, sizeof(sharpnessMapSize)/sizeof(int32_t));
+
+ static const int32_t maxSharpnessMapValue = 1000;
+ ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+ &maxSharpnessMapValue, 1);
+
+ // android.control
+
+ static const uint8_t availableSceneModes[] = {
+ ANDROID_CONTROL_SCENE_MODE_DISABLED
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ availableSceneModes, sizeof(availableSceneModes));
+
+ static const uint8_t availableEffects[] = {
+ ANDROID_CONTROL_EFFECT_MODE_OFF
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ availableEffects, sizeof(availableEffects));
+
+ static const int32_t max3aRegions[] = {/*AE*/ 0,/*AWB*/ 0,/*AF*/ 0};
+ ADD_OR_SIZE(ANDROID_CONTROL_MAX_REGIONS,
+ max3aRegions, sizeof(max3aRegions)/sizeof(max3aRegions[0]));
+
+ static const uint8_t availableAeModes[] = {
+ ANDROID_CONTROL_AE_MODE_OFF,
+ ANDROID_CONTROL_AE_MODE_ON
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ availableAeModes, sizeof(availableAeModes));
+
+ static const camera_metadata_rational exposureCompensationStep = {
+ 1, 3
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ &exposureCompensationStep, 1);
+
+ int32_t exposureCompensationRange[] = {-9, 9};
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ exposureCompensationRange,
+ sizeof(exposureCompensationRange)/sizeof(int32_t));
+
+ static const int32_t availableTargetFpsRanges[] = {
+ 5, 30, 15, 30
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ availableTargetFpsRanges,
+ sizeof(availableTargetFpsRanges)/sizeof(int32_t));
+
+ static const uint8_t availableAntibandingModes[] = {
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ availableAntibandingModes, sizeof(availableAntibandingModes));
+
+ static const uint8_t availableAwbModes[] = {
+ ANDROID_CONTROL_AWB_MODE_OFF,
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
+ ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
+ ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
+ ANDROID_CONTROL_AWB_MODE_SHADE
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ availableAwbModes, sizeof(availableAwbModes));
+
+ static const uint8_t availableAfModesBack[] = {
+ ANDROID_CONTROL_AF_MODE_OFF,
+ ANDROID_CONTROL_AF_MODE_AUTO,
+ ANDROID_CONTROL_AF_MODE_MACRO,
+ ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+ ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE
+ };
+
+ static const uint8_t availableAfModesFront[] = {
+ ANDROID_CONTROL_AF_MODE_OFF
+ };
+
+ if (mFacingBack) {
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModesBack, sizeof(availableAfModesBack));
+ } else {
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModesFront, sizeof(availableAfModesFront));
+ }
+
+ static const uint8_t availableVstabModes[] = {
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ availableVstabModes, sizeof(availableVstabModes));
+
+#undef ADD_OR_SIZE
+ /** Allocate metadata if sizing */
+ if (sizeRequest) {
+ ALOGV("Allocating %zu entries, %zu extra bytes for "
+ "static camera info",
+ entryCount, dataCount);
+ *info = allocate_camera_metadata(entryCount, dataCount);
+ if (*info == NULL) {
+ ALOGE("Unable to allocate camera static info"
+ "(%zu entries, %zu bytes extra data)",
+ entryCount, dataCount);
+ return NO_MEMORY;
+ }
+ }
+ return OK;
+}
+
+status_t EmulatedFakeCamera2::constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request,
+ bool sizeRequest) const {
+
+ size_t entryCount = 0;
+ size_t dataCount = 0;
+ status_t ret;
+
+#define ADD_OR_SIZE( tag, data, count ) \
+ if ( ( ret = addOrSize(*request, sizeRequest, &entryCount, &dataCount, \
+ tag, data, count) ) != OK ) return ret
+
+ /** android.request */
+
+ static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
+ ADD_OR_SIZE(ANDROID_REQUEST_TYPE, &requestType, 1);
+
+ static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+ ADD_OR_SIZE(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+
+ static const int32_t id = 0;
+ ADD_OR_SIZE(ANDROID_REQUEST_ID, &id, 1);
+
+ static const int32_t frameCount = 0;
+ ADD_OR_SIZE(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+
+ // OUTPUT_STREAMS set by user
+ entryCount += 1;
+ dataCount += 5; // TODO: Should be maximum stream number
+
+ /** android.lens */
+
+ static const float focusDistance = 0;
+ ADD_OR_SIZE(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+
+ static const float aperture = 2.8f;
+ ADD_OR_SIZE(ANDROID_LENS_APERTURE, &aperture, 1);
+
+ static const float focalLength = 5.0f;
+ ADD_OR_SIZE(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+
+ static const float filterDensity = 0;
+ ADD_OR_SIZE(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+
+ static const uint8_t opticalStabilizationMode =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ &opticalStabilizationMode, 1);
+
+ // FOCUS_RANGE set only in frame
+
+ /** android.sensor */
+
+ static const int64_t exposureTime = 10 * MSEC;
+ ADD_OR_SIZE(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
+
+ static const int64_t frameDuration = 33333333L; // 1/30 s
+ ADD_OR_SIZE(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+
+ static const int32_t sensitivity = 100;
+ ADD_OR_SIZE(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
+
+ // TIMESTAMP set only in frame
+
+ /** android.flash */
+
+ static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_FLASH_MODE, &flashMode, 1);
+
+ static const uint8_t flashPower = 10;
+ ADD_OR_SIZE(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+
+ static const int64_t firingTime = 0;
+ ADD_OR_SIZE(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+
+ /** Processing block modes */
+ uint8_t hotPixelMode = 0;
+ uint8_t demosaicMode = 0;
+ uint8_t noiseMode = 0;
+ uint8_t shadingMode = 0;
+ uint8_t colorMode = 0;
+ uint8_t tonemapMode = 0;
+ uint8_t edgeMode = 0;
+ switch (request_template) {
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ // fall-through
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ // fall-through
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+ shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
+ tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+ edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+ break;
+ case CAMERA2_TEMPLATE_PREVIEW:
+ // fall-through
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ // fall-through
+ default:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+ shadingMode = ANDROID_SHADING_MODE_FAST;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+ tonemapMode = ANDROID_TONEMAP_MODE_FAST;
+ edgeMode = ANDROID_EDGE_MODE_FAST;
+ break;
+ }
+ ADD_OR_SIZE(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
+ ADD_OR_SIZE(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+ ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
+ ADD_OR_SIZE(ANDROID_SHADING_MODE, &shadingMode, 1);
+ ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
+ ADD_OR_SIZE(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
+ ADD_OR_SIZE(ANDROID_EDGE_MODE, &edgeMode, 1);
+
+ /** android.noise */
+ static const uint8_t noiseStrength = 5;
+ ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_STRENGTH, &noiseStrength, 1);
+
+ /** android.color */
+ static const float colorTransform[9] = {
+ 1.0f, 0.f, 0.f,
+ 0.f, 1.f, 0.f,
+ 0.f, 0.f, 1.f
+ };
+ ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
+
+ /** android.tonemap */
+ static const float tonemapCurve[4] = {
+ 0.f, 0.f,
+ 1.f, 1.f
+ };
+ ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
+ ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
+ ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
+
+ /** android.edge */
+ static const uint8_t edgeStrength = 5;
+ ADD_OR_SIZE(ANDROID_EDGE_STRENGTH, &edgeStrength, 1);
+
+ /** android.scaler */
+ static const int32_t cropRegion[3] = {
+ 0, 0, static_cast(Sensor::kResolution[0])
+ };
+ ADD_OR_SIZE(ANDROID_SCALER_CROP_REGION, cropRegion, 3);
+
+ /** android.jpeg */
+ static const int32_t jpegQuality = 80;
+ ADD_OR_SIZE(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+ static const int32_t thumbnailSize[2] = {
+ 640, 480
+ };
+ ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+ static const int32_t thumbnailQuality = 80;
+ ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+ static const double gpsCoordinates[2] = {
+ 0, 0
+ };
+ ADD_OR_SIZE(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+ static const uint8_t gpsProcessingMethod[32] = "None";
+ ADD_OR_SIZE(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
+
+ static const int64_t gpsTimestamp = 0;
+ ADD_OR_SIZE(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+ static const int32_t jpegOrientation = 0;
+ ADD_OR_SIZE(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+
+ /** android.stats */
+
+ static const uint8_t faceDetectMode =
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+ static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1);
+
+ static const uint8_t sharpnessMapMode =
+ ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
+
+ // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
+ // sharpnessMap only in frames
+
+ /** android.control */
+
+ uint8_t controlIntent = 0;
+ switch (request_template) {
+ case CAMERA2_TEMPLATE_PREVIEW:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ break;
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+ break;
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ break;
+ default:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+ break;
+ }
+ ADD_OR_SIZE(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+ static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+ ADD_OR_SIZE(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+ static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+ static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+ ADD_OR_SIZE(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+ static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+ static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+ static const int32_t controlRegions[5] = {
+ 0, 0,
+ static_cast(Sensor::kResolution[0]),
+ static_cast(Sensor::kResolution[1]),
+ 1000
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+ static const int32_t aeExpCompensation = 0;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+ static const int32_t aeTargetFpsRange[2] = {
+ 10, 30
+ };
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+ static const uint8_t aeAntibandingMode =
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ ADD_OR_SIZE(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+ static const uint8_t awbMode =
+ ANDROID_CONTROL_AWB_MODE_AUTO;
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+ static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+ ADD_OR_SIZE(ANDROID_CONTROL_AWB_REGIONS, controlRegions, 5);
+
+ uint8_t afMode = 0;
+ switch (request_template) {
+ case CAMERA2_TEMPLATE_PREVIEW:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ case CAMERA2_TEMPLATE_STILL_CAPTURE:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_RECORD:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ break;
+ default:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ }
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+
+ ADD_OR_SIZE(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+ static const uint8_t vstabMode =
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ ADD_OR_SIZE(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
+
+ // aeState, awbState, afState only in frame
+
+ /** Allocate metadata if sizing */
+ if (sizeRequest) {
+ ALOGV("Allocating %zu entries, %zu extra bytes for "
+ "request template type %d",
+ entryCount, dataCount, request_template);
+ *request = allocate_camera_metadata(entryCount, dataCount);
+ if (*request == NULL) {
+ ALOGE("Unable to allocate new request template type %d "
+ "(%zu entries, %zu bytes extra data)", request_template,
+ entryCount, dataCount);
+ return NO_MEMORY;
+ }
+ }
+ return OK;
+#undef ADD_OR_SIZE
+}
+
+status_t EmulatedFakeCamera2::addOrSize(camera_metadata_t *request,
+ bool sizeRequest,
+ size_t *entryCount,
+ size_t *dataCount,
+ uint32_t tag,
+ const void *entryData,
+ size_t entryDataCount) {
+ status_t res;
+ if (!sizeRequest) {
+ return add_camera_metadata_entry(request, tag, entryData,
+ entryDataCount);
+ } else {
+ int type = get_camera_metadata_tag_type(tag);
+ if (type < 0 ) return BAD_VALUE;
+ (*entryCount)++;
+ (*dataCount) += calculate_camera_metadata_entry_data_size(type,
+ entryDataCount);
+ return OK;
+ }
+}
+
+bool EmulatedFakeCamera2::isStreamInUse(uint32_t id) {
+ // Assumes mMutex is locked; otherwise new requests could enter
+ // configureThread while readoutThread is being checked
+
+ // Order of isStreamInUse calls matters
+ if (mConfigureThread->isStreamInUse(id) ||
+ mReadoutThread->isStreamInUse(id) ||
+ mJpegCompressor->isStreamInUse(id) ) {
+ ALOGE("%s: Stream %d is in use in active requests!",
+ __FUNCTION__, id);
+ return true;
+ }
+ return false;
+}
+
+bool EmulatedFakeCamera2::isReprocessStreamInUse(uint32_t id) {
+ // TODO: implement
+ return false;
+}
+
+const Stream& EmulatedFakeCamera2::getStreamInfo(uint32_t streamId) {
+ Mutex::Autolock lock(mMutex);
+
+ return mStreams.valueFor(streamId);
+}
+
+const ReprocessStream& EmulatedFakeCamera2::getReprocessStreamInfo(uint32_t streamId) {
+ Mutex::Autolock lock(mMutex);
+
+ return mReprocessStreams.valueFor(streamId);
+}
+
+}; /* namespace android */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera2.h b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera2.h
new file mode 100644
index 0000000..64c8667
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera2.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H
+#define HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H
+
+/*
+ * Contains declaration of a class EmulatedFakeCamera2 that encapsulates
+ * functionality of a fake camera that implements version 2 of the camera device
+ * interface.
+ */
+
+#include "EmulatedCamera2.h"
+#include "fake-pipeline2/Base.h"
+#include "fake-pipeline2/Sensor.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include
+#include
+#include
+#include
+
+namespace android {
+
+/* Encapsulates functionality of an advanced fake camera. This camera contains
+ * a simple simulation of a scene, sensor, and image processing pipeline.
+ */
+class EmulatedFakeCamera2 : public EmulatedCamera2 {
+public:
+ /* Constructs EmulatedFakeCamera instance. */
+ EmulatedFakeCamera2(int cameraId, bool facingBack, struct hw_module_t* module);
+
+ /* Destructs EmulatedFakeCamera instance. */
+ ~EmulatedFakeCamera2();
+
+ /****************************************************************************
+ * EmulatedCamera2 virtual overrides.
+ ***************************************************************************/
+
+public:
+ /* Initializes EmulatedFakeCamera2 instance. */
+ status_t Initialize();
+
+ /****************************************************************************
+ * Camera Module API and generic hardware device API implementation
+ ***************************************************************************/
+public:
+
+ virtual status_t connectCamera(hw_device_t** device);
+
+ virtual status_t plugCamera();
+ virtual status_t unplugCamera();
+ virtual camera_device_status_t getHotplugStatus();
+
+ virtual status_t closeCamera();
+
+ virtual status_t getCameraInfo(struct camera_info *info);
+
+ /****************************************************************************
+ * EmulatedCamera2 abstract API implementation.
+ ***************************************************************************/
+protected:
+ /** Request input queue */
+
+ virtual int requestQueueNotify();
+
+ /** Count of requests in flight */
+ virtual int getInProgressCount();
+
+ /** Cancel all captures in flight */
+ //virtual int flushCapturesInProgress();
+
+ /** Construct default request */
+ virtual int constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request);
+
+ virtual int allocateStream(
+ uint32_t width,
+ uint32_t height,
+ int format,
+ const camera2_stream_ops_t *stream_ops,
+ uint32_t *stream_id,
+ uint32_t *format_actual,
+ uint32_t *usage,
+ uint32_t *max_buffers);
+
+ virtual int registerStreamBuffers(
+ uint32_t stream_id,
+ int num_buffers,
+ buffer_handle_t *buffers);
+
+ virtual int releaseStream(uint32_t stream_id);
+
+ // virtual int allocateReprocessStream(
+ // uint32_t width,
+ // uint32_t height,
+ // uint32_t format,
+ // const camera2_stream_ops_t *stream_ops,
+ // uint32_t *stream_id,
+ // uint32_t *format_actual,
+ // uint32_t *usage,
+ // uint32_t *max_buffers);
+
+ virtual int allocateReprocessStreamFromStream(
+ uint32_t output_stream_id,
+ const camera2_stream_in_ops_t *stream_ops,
+ uint32_t *stream_id);
+
+ virtual int releaseReprocessStream(uint32_t stream_id);
+
+ virtual int triggerAction(uint32_t trigger_id,
+ int32_t ext1,
+ int32_t ext2);
+
+ /** Debug methods */
+
+ virtual int dump(int fd);
+
+public:
+ /****************************************************************************
+ * Utility methods called by configure/readout threads and pipeline
+ ***************************************************************************/
+
+ // Get information about a given stream. Will lock mMutex
+ const Stream &getStreamInfo(uint32_t streamId);
+ const ReprocessStream &getReprocessStreamInfo(uint32_t streamId);
+
+ // Notifies rest of camera subsystem of serious error
+ void signalError();
+
+private:
+ /****************************************************************************
+ * Utility methods
+ ***************************************************************************/
+ /** Construct static camera metadata, two-pass */
+ status_t constructStaticInfo(
+ camera_metadata_t **info,
+ bool sizeRequest) const;
+
+ /** Two-pass implementation of constructDefaultRequest */
+ status_t constructDefaultRequest(
+ int request_template,
+ camera_metadata_t **request,
+ bool sizeRequest) const;
+ /** Helper function for constructDefaultRequest */
+ static status_t addOrSize( camera_metadata_t *request,
+ bool sizeRequest,
+ size_t *entryCount,
+ size_t *dataCount,
+ uint32_t tag,
+ const void *entry_data,
+ size_t entry_count);
+
+ /** Determine if the stream id is listed in any currently-in-flight
+ * requests. Assumes mMutex is locked */
+ bool isStreamInUse(uint32_t streamId);
+
+ /** Determine if the reprocess stream id is listed in any
+ * currently-in-flight requests. Assumes mMutex is locked */
+ bool isReprocessStreamInUse(uint32_t streamId);
+
+ /****************************************************************************
+ * Pipeline controller threads
+ ***************************************************************************/
+
+ class ConfigureThread: public Thread {
+ public:
+ ConfigureThread(EmulatedFakeCamera2 *parent);
+ ~ConfigureThread();
+
+ status_t waitUntilRunning();
+ status_t newRequestAvailable();
+ status_t readyToRun();
+
+ bool isStreamInUse(uint32_t id);
+ int getInProgressCount();
+ private:
+ EmulatedFakeCamera2 *mParent;
+ static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
+
+ bool mRunning;
+ bool threadLoop();
+
+ bool setupCapture();
+ bool setupReprocess();
+
+ bool configureNextCapture();
+ bool configureNextReprocess();
+
+ bool getBuffers();
+
+ Mutex mInputMutex; // Protects mActive, mRequestCount
+ Condition mInputSignal;
+ bool mActive; // Whether we're waiting for input requests or actively
+ // working on them
+ size_t mRequestCount;
+
+ camera_metadata_t *mRequest;
+
+ Mutex mInternalsMutex; // Lock before accessing below members.
+ bool mWaitingForReadout;
+ bool mNextNeedsJpeg;
+ bool mNextIsCapture;
+ int32_t mNextFrameNumber;
+ int64_t mNextExposureTime;
+ int64_t mNextFrameDuration;
+ int32_t mNextSensitivity;
+ Buffers *mNextBuffers;
+ };
+
+ class ReadoutThread: public Thread, private JpegCompressor::JpegListener {
+ public:
+ ReadoutThread(EmulatedFakeCamera2 *parent);
+ ~ReadoutThread();
+
+ status_t readyToRun();
+
+ // Input
+ status_t waitUntilRunning();
+ bool waitForReady(nsecs_t timeout);
+ void setNextOperation(bool isCapture,
+ camera_metadata_t *request,
+ Buffers *buffers);
+ bool isStreamInUse(uint32_t id);
+ int getInProgressCount();
+ private:
+ EmulatedFakeCamera2 *mParent;
+
+ bool mRunning;
+ bool threadLoop();
+
+ bool readyForNextCapture();
+ status_t collectStatisticsMetadata(camera_metadata_t *frame);
+
+ // Inputs
+ Mutex mInputMutex; // Protects mActive, mInFlightQueue, mRequestCount
+ Condition mInputSignal;
+ Condition mReadySignal;
+
+ bool mActive;
+
+ static const int kInFlightQueueSize = 4;
+ struct InFlightQueue {
+ bool isCapture;
+ camera_metadata_t *request;
+ Buffers *buffers;
+ } *mInFlightQueue;
+
+ size_t mInFlightHead;
+ size_t mInFlightTail;
+
+ size_t mRequestCount;
+
+ // Internals
+ Mutex mInternalsMutex;
+
+ bool mIsCapture;
+ camera_metadata_t *mRequest;
+ Buffers *mBuffers;
+
+ // Jpeg completion listeners
+ void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
+ void onJpegInputDone(const StreamBuffer &inputBuffer);
+ nsecs_t mJpegTimestamp;
+ };
+
+ // 3A management thread (auto-exposure, focus, white balance)
+ class ControlThread: public Thread {
+ public:
+ ControlThread(EmulatedFakeCamera2 *parent);
+ ~ControlThread();
+
+ status_t readyToRun();
+
+ status_t waitUntilRunning();
+
+ // Interpret request's control parameters and override
+ // capture settings as needed
+ status_t processRequest(camera_metadata_t *request);
+
+ status_t triggerAction(uint32_t msgType,
+ int32_t ext1, int32_t ext2);
+ private:
+ ControlThread(const ControlThread &t);
+ ControlThread& operator=(const ControlThread &t);
+
+ // Constants controlling fake 3A behavior
+ static const nsecs_t kControlCycleDelay;
+ static const nsecs_t kMinAfDuration;
+ static const nsecs_t kMaxAfDuration;
+ static const float kAfSuccessRate;
+ static const float kContinuousAfStartRate;
+
+ static const float kAeScanStartRate;
+ static const nsecs_t kMinAeDuration;
+ static const nsecs_t kMaxAeDuration;
+ static const nsecs_t kMinPrecaptureAeDuration;
+ static const nsecs_t kMaxPrecaptureAeDuration;
+
+ static const nsecs_t kNormalExposureTime;
+ static const nsecs_t kExposureJump;
+ static const nsecs_t kMinExposureTime;
+
+ EmulatedFakeCamera2 *mParent;
+
+ bool mRunning;
+ bool threadLoop();
+
+ Mutex mInputMutex; // Protects input methods
+ Condition mInputSignal;
+
+ // Trigger notifications
+ bool mStartAf;
+ bool mCancelAf;
+ bool mStartPrecapture;
+
+ // Latest state for 3A request fields
+ uint8_t mControlMode;
+
+ uint8_t mEffectMode;
+ uint8_t mSceneMode;
+
+ uint8_t mAfMode;
+ bool mAfModeChange;
+
+ uint8_t mAwbMode;
+ uint8_t mAeMode;
+
+ // Latest trigger IDs
+ int32_t mAfTriggerId;
+ int32_t mPrecaptureTriggerId;
+
+ // Current state for 3A algorithms
+ uint8_t mAfState;
+ uint8_t mAeState;
+ uint8_t mAwbState;
+ bool mAeLock;
+
+ // Current control parameters
+ nsecs_t mExposureTime;
+
+ // Private to threadLoop and its utility methods
+
+ nsecs_t mAfScanDuration;
+ nsecs_t mAeScanDuration;
+ bool mLockAfterPassiveScan;
+
+ // Utility methods for AF
+ int processAfTrigger(uint8_t afMode, uint8_t afState);
+ int maybeStartAfScan(uint8_t afMode, uint8_t afState);
+ int updateAfScan(uint8_t afMode, uint8_t afState, nsecs_t *maxSleep);
+ void updateAfState(uint8_t newState, int32_t triggerId);
+
+ // Utility methods for precapture trigger
+ int processPrecaptureTrigger(uint8_t aeMode, uint8_t aeState);
+ int maybeStartAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState);
+ int updateAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState,
+ nsecs_t *maxSleep);
+ void updateAeState(uint8_t newState, int32_t triggerId);
+ };
+
+ /****************************************************************************
+ * Static configuration information
+ ***************************************************************************/
+private:
+ static const uint32_t kMaxRawStreamCount = 1;
+ static const uint32_t kMaxProcessedStreamCount = 3;
+ static const uint32_t kMaxJpegStreamCount = 1;
+ static const uint32_t kMaxReprocessStreamCount = 2;
+ static const uint32_t kMaxBufferCount = 4;
+ static const uint32_t kAvailableFormats[];
+ static const uint32_t kAvailableRawSizes[];
+ static const uint64_t kAvailableRawMinDurations[];
+ static const uint32_t kAvailableProcessedSizesBack[];
+ static const uint32_t kAvailableProcessedSizesFront[];
+ static const uint64_t kAvailableProcessedMinDurations[];
+ static const uint32_t kAvailableJpegSizesBack[];
+ static const uint32_t kAvailableJpegSizesFront[];
+ static const uint64_t kAvailableJpegMinDurations[];
+
+ /****************************************************************************
+ * Data members.
+ ***************************************************************************/
+
+protected:
+ /* Facing back (true) or front (false) switch. */
+ bool mFacingBack;
+
+private:
+ bool mIsConnected;
+
+ /** Stream manipulation */
+ uint32_t mNextStreamId;
+ uint32_t mRawStreamCount;
+ uint32_t mProcessedStreamCount;
+ uint32_t mJpegStreamCount;
+
+ uint32_t mNextReprocessStreamId;
+ uint32_t mReprocessStreamCount;
+
+ KeyedVector mStreams;
+ KeyedVector mReprocessStreams;
+
+ /** Simulated hardware interfaces */
+ sp mSensor;
+ sp mJpegCompressor;
+
+ /** Pipeline control threads */
+ sp mConfigureThread;
+ sp mReadoutThread;
+ sp mControlThread;
+};
+
+}; /* namespace android */
+
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H */
diff --git a/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera3.cpp b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera3.cpp
new file mode 100644
index 0000000..50d6096
--- /dev/null
+++ b/src/type3_AndroidCloud/anbox-master/android/camera/EmulatedFakeCamera3.cpp
@@ -0,0 +1,2519 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Contains implementation of a class EmulatedFakeCamera3 that encapsulates
+ * functionality of an advanced fake camera.
+ */
+
+#include
+
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+#define LOG_TAG "EmulatedCamera_FakeCamera3"
+#include
+#include
+
+#include "EmulatedFakeCamera3.h"
+#include "EmulatedCameraFactory.h"
+#include
+#include
+#include
+#include "gralloc_cb.h"
+
+#include "fake-pipeline2/Sensor.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include
+
+#include
+
+#if defined(LOG_NNDEBUG) && LOG_NNDEBUG == 0
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+namespace android {
+
+/**
+ * Constants for camera capabilities
+ */
+
+const int64_t USEC = 1000LL;
+const int64_t MSEC = USEC * 1000LL;
+const int64_t SEC = MSEC * 1000LL;
+
+const int32_t EmulatedFakeCamera3::kAvailableFormats[] = {
+ HAL_PIXEL_FORMAT_RAW16,
+ HAL_PIXEL_FORMAT_BLOB,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ // These are handled by YCbCr_420_888
+ // HAL_PIXEL_FORMAT_YV12,
+ // HAL_PIXEL_FORMAT_YCrCb_420_SP,
+ HAL_PIXEL_FORMAT_YCbCr_420_888,
+ HAL_PIXEL_FORMAT_Y16
+};
+
+/**
+ * 3A constants
+ */
+
+// Default exposure and gain targets for different scenarios
+const nsecs_t EmulatedFakeCamera3::kNormalExposureTime = 10 * MSEC;
+const nsecs_t EmulatedFakeCamera3::kFacePriorityExposureTime = 30 * MSEC;
+const int EmulatedFakeCamera3::kNormalSensitivity = 100;
+const int EmulatedFakeCamera3::kFacePrioritySensitivity = 400;
+const float EmulatedFakeCamera3::kExposureTrackRate = 0.1;
+const int EmulatedFakeCamera3::kPrecaptureMinFrames = 10;
+const int EmulatedFakeCamera3::kStableAeMaxFrames = 100;
+const float EmulatedFakeCamera3::kExposureWanderMin = -2;
+const float EmulatedFakeCamera3::kExposureWanderMax = 1;
+
+/**
+ * Camera device lifecycle methods
+ */
+
+EmulatedFakeCamera3::EmulatedFakeCamera3(int cameraId, bool facingBack,
+ struct hw_module_t* module) :
+ EmulatedCamera3(cameraId, module),
+ mFacingBack(facingBack) {
+ ALOGI("Constructing emulated fake camera 3: ID %d, facing %s",
+ mCameraID, facingBack ? "back" : "front");
+
+ for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
+ mDefaultTemplates[i] = NULL;
+ }
+
+}
+
+EmulatedFakeCamera3::~EmulatedFakeCamera3() {
+ for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
+ if (mDefaultTemplates[i] != NULL) {
+ free_camera_metadata(mDefaultTemplates[i]);
+ }
+ }
+}
+
+status_t EmulatedFakeCamera3::Initialize() {
+ ALOGV("%s: E", __FUNCTION__);
+ status_t res;
+
+ if (mStatus != STATUS_ERROR) {
+ ALOGE("%s: Already initialized!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ res = getCameraCapabilities();
+ if (res != OK) {
+ ALOGE("%s: Unable to get camera capabilities: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ res = constructStaticInfo();
+ if (res != OK) {
+ ALOGE("%s: Unable to allocate static info: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ return EmulatedCamera3::Initialize();
+}
+
+status_t EmulatedFakeCamera3::connectCamera(hw_device_t** device) {
+ ALOGV("%s: E", __FUNCTION__);
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ if (mStatus != STATUS_CLOSED) {
+ ALOGE("%s: Can't connect in state %d", __FUNCTION__, mStatus);
+ return INVALID_OPERATION;
+ }
+
+ mSensor = new Sensor();
+ mSensor->setSensorListener(this);
+
+ res = mSensor->startUp();
+ if (res != NO_ERROR) return res;
+
+ mReadoutThread = new ReadoutThread(this);
+ mJpegCompressor = new JpegCompressor();
+
+ res = mReadoutThread->run("EmuCam3::readoutThread");
+ if (res != NO_ERROR) return res;
+
+ // Initialize fake 3A
+
+ mControlMode = ANDROID_CONTROL_MODE_AUTO;
+ mFacePriority = false;
+ mAeMode = ANDROID_CONTROL_AE_MODE_ON;
+ mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+ mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+ mAeCounter = 0;
+ mAeTargetExposureTime = kNormalExposureTime;
+ mAeCurrentExposureTime = kNormalExposureTime;
+ mAeCurrentSensitivity = kNormalSensitivity;
+
+ return EmulatedCamera3::connectCamera(device);
+}
+
+status_t EmulatedFakeCamera3::closeCamera() {
+ ALOGV("%s: E", __FUNCTION__);
+ status_t res;
+ {
+ Mutex::Autolock l(mLock);
+ if (mStatus == STATUS_CLOSED) return OK;
+
+ res = mSensor->shutDown();
+ if (res != NO_ERROR) {
+ ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+ return res;
+ }
+ mSensor.clear();
+
+ mReadoutThread->requestExit();
+ }
+
+ mReadoutThread->join();
+
+ {
+ Mutex::Autolock l(mLock);
+ // Clear out private stream information
+ for (StreamIterator s = mStreams.begin(); s != mStreams.end(); s++) {
+ PrivateStreamInfo *privStream =
+ static_cast((*s)->priv);
+ delete privStream;
+ (*s)->priv = NULL;
+ }
+ mStreams.clear();
+ mReadoutThread.clear();
+ }
+
+ return EmulatedCamera3::closeCamera();
+}
+
+status_t EmulatedFakeCamera3::getCameraInfo(struct camera_info *info) {
+ info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+ info->orientation = gEmulatedCameraFactory.getFakeCameraOrientation();
+ return EmulatedCamera3::getCameraInfo(info);
+}
+
+/**
+ * Camera3 interface methods
+ */
+
+status_t EmulatedFakeCamera3::configureStreams(
+ camera3_stream_configuration *streamList) {
+ Mutex::Autolock l(mLock);
+ ALOGV("%s: %d streams", __FUNCTION__, streamList->num_streams);
+
+ if (mStatus != STATUS_OPEN && mStatus != STATUS_READY) {
+ ALOGE("%s: Cannot configure streams in state %d",
+ __FUNCTION__, mStatus);
+ return NO_INIT;
+ }
+
+ /**
+ * Sanity-check input list.
+ */
+ if (streamList == NULL) {
+ ALOGE("%s: NULL stream configuration", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamList->streams == NULL) {
+ ALOGE("%s: NULL stream list", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamList->num_streams < 1) {
+ ALOGE("%s: Bad number of streams requested: %d", __FUNCTION__,
+ streamList->num_streams);
+ return BAD_VALUE;
+ }
+
+ camera3_stream_t *inputStream = NULL;
+ for (size_t i = 0; i < streamList->num_streams; i++) {
+ camera3_stream_t *newStream = streamList->streams[i];
+
+ if (newStream == NULL) {
+ ALOGE("%s: Stream index %zu was NULL",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+
+ ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x",
+ __FUNCTION__, newStream, i, newStream->stream_type,
+ newStream->usage,
+ newStream->format);
+
+ if (newStream->stream_type == CAMERA3_STREAM_INPUT ||
+ newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
+ if (inputStream != NULL) {
+
+ ALOGE("%s: Multiple input streams requested!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ inputStream = newStream;
+ }
+
+ bool validFormat = false;
+ for (size_t f = 0;
+ f < sizeof(kAvailableFormats)/sizeof(kAvailableFormats[0]);
+ f++) {
+ if (newStream->format == kAvailableFormats[f]) {
+ validFormat = true;
+ break;
+ }
+ }
+ if (!validFormat) {
+ ALOGE("%s: Unsupported stream format 0x%x requested",
+ __FUNCTION__, newStream->format);
+ return BAD_VALUE;
+ }
+ }
+ mInputStream = inputStream;
+
+ /**
+ * Initially mark all existing streams as not alive
+ */
+ for (StreamIterator s = mStreams.begin(); s != mStreams.end(); ++s) {
+ PrivateStreamInfo *privStream =
+ static_cast((*s)->priv);
+ privStream->alive = false;
+ }
+
+ /**
+ * Find new streams and mark still-alive ones
+ */
+ for (size_t i = 0; i < streamList->num_streams; i++) {
+ camera3_stream_t *newStream = streamList->streams[i];
+ if (newStream->priv == NULL) {
+ // New stream, construct info
+ PrivateStreamInfo *privStream = new PrivateStreamInfo();
+ privStream->alive = true;
+
+ newStream->max_buffers = kMaxBufferCount;
+ newStream->priv = privStream;
+ mStreams.push_back(newStream);
+ } else {
+ // Existing stream, mark as still alive.
+ PrivateStreamInfo *privStream =
+ static_cast(newStream->priv);
+ privStream->alive = true;
+ }
+ // Always update usage and max buffers
+ newStream->max_buffers = kMaxBufferCount;
+ switch (newStream->stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ newStream->usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
+ break;
+ case CAMERA3_STREAM_INPUT:
+ newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ;
+ break;
+ case CAMERA3_STREAM_BIDIRECTIONAL:
+ newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ |
+ GRALLOC_USAGE_HW_CAMERA_WRITE;
+ break;
+ }
+ }
+
+ /**
+ * Reap the dead streams
+ */
+ for (StreamIterator s = mStreams.begin(); s != mStreams.end();) {
+ PrivateStreamInfo *privStream =
+ static_cast((*s)->priv);
+ if (!privStream->alive) {
+ (*s)->priv = NULL;
+ delete privStream;
+ s = mStreams.erase(s);
+ } else {
+ ++s;
+ }
+ }
+
+ /**
+ * Can't reuse settings across configure call
+ */
+ mPrevSettings.clear();
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::registerStreamBuffers(
+ const camera3_stream_buffer_set *bufferSet) {
+ ALOGV("%s: E", __FUNCTION__);
+ Mutex::Autolock l(mLock);
+
+ // Should not be called in HAL versions >= 3.2
+
+ ALOGE("%s: Should not be invoked on new HALs!",
+ __FUNCTION__);
+ return NO_INIT;
+}
+
+const camera_metadata_t* EmulatedFakeCamera3::constructDefaultRequestSettings(
+ int type) {
+ ALOGV("%s: E", __FUNCTION__);
+ Mutex::Autolock l(mLock);
+
+ if (type < 0 || type >= CAMERA3_TEMPLATE_COUNT) {
+ ALOGE("%s: Unknown request settings template: %d",
+ __FUNCTION__, type);
+ return NULL;
+ }
+
+ if (!hasCapability(BACKWARD_COMPATIBLE) && type != CAMERA3_TEMPLATE_PREVIEW) {
+ ALOGE("%s: Template %d not supported w/o BACKWARD_COMPATIBLE capability",
+ __FUNCTION__, type);
+ return NULL;
+ }
+
+ /**
+ * Cache is not just an optimization - pointer returned has to live at
+ * least as long as the camera device instance does.
+ */
+ if (mDefaultTemplates[type] != NULL) {
+ return mDefaultTemplates[type];
+ }
+
+ CameraMetadata settings;
+
+ /** android.request */
+
+ static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+ settings.update(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+
+ static const int32_t id = 0;
+ settings.update(ANDROID_REQUEST_ID, &id, 1);
+
+ static const int32_t frameCount = 0;
+ settings.update(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+
+ /** android.lens */
+
+ static const float focalLength = 5.0f;
+ settings.update(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+
+ if (hasCapability(BACKWARD_COMPATIBLE)) {
+ static const float focusDistance = 0;
+ settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+
+ static const float aperture = 2.8f;
+ settings.update(ANDROID_LENS_APERTURE, &aperture, 1);
+
+ static const float filterDensity = 0;
+ settings.update(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+
+ static const uint8_t opticalStabilizationMode =
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ &opticalStabilizationMode, 1);
+
+ // FOCUS_RANGE set only in frame
+ }
+
+ /** android.sensor */
+
+ if (hasCapability(MANUAL_SENSOR)) {
+ static const int64_t exposureTime = 10 * MSEC;
+ settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
+
+ static const int64_t frameDuration = 33333333L; // 1/30 s
+ settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+
+ static const int32_t sensitivity = 100;
+ settings.update(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
+ }
+
+ // TIMESTAMP set only in frame
+
+ /** android.flash */
+
+ if (hasCapability(BACKWARD_COMPATIBLE)) {
+ static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+ settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
+
+ static const uint8_t flashPower = 10;
+ settings.update(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+
+ static const int64_t firingTime = 0;
+ settings.update(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+ }
+
+ /** Processing block modes */
+ if (hasCapability(MANUAL_POST_PROCESSING)) {
+ uint8_t hotPixelMode = 0;
+ uint8_t demosaicMode = 0;
+ uint8_t noiseMode = 0;
+ uint8_t shadingMode = 0;
+ uint8_t colorMode = 0;
+ uint8_t tonemapMode = 0;
+ uint8_t edgeMode = 0;
+ switch (type) {
+ case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ // fall-through
+ case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+ // fall-through
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+ shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
+ tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+ edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+ break;
+ case CAMERA3_TEMPLATE_PREVIEW:
+ // fall-through
+ case CAMERA3_TEMPLATE_VIDEO_RECORD:
+ // fall-through
+ default:
+ hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+ demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+ noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+ shadingMode = ANDROID_SHADING_MODE_FAST;
+ colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+ tonemapMode = ANDROID_TONEMAP_MODE_FAST;
+ edgeMode = ANDROID_EDGE_MODE_FAST;
+ break;
+ }
+ settings.update(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
+ settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+ settings.update(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
+ settings.update(ANDROID_SHADING_MODE, &shadingMode, 1);
+ settings.update(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
+ settings.update(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
+ settings.update(ANDROID_EDGE_MODE, &edgeMode, 1);
+ }
+
+ /** android.colorCorrection */
+
+ if (hasCapability(MANUAL_POST_PROCESSING)) {
+ static const camera_metadata_rational colorTransform[9] = {
+ {1,1}, {0,1}, {0,1},
+ {0,1}, {1,1}, {0,1},
+ {0,1}, {0,1}, {1,1}
+ };
+ settings.update(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
+
+ static const float colorGains[4] = {
+ 1.0f, 1.0f, 1.0f, 1.0f
+ };
+ settings.update(ANDROID_COLOR_CORRECTION_GAINS, colorGains, 4);
+ }
+
+ /** android.tonemap */
+
+ if (hasCapability(MANUAL_POST_PROCESSING)) {
+ static const float tonemapCurve[4] = {
+ 0.f, 0.f,
+ 1.f, 1.f
+ };
+ settings.update(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
+ settings.update(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
+ settings.update(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
+ }
+
+ /** android.scaler */
+ if (hasCapability(BACKWARD_COMPATIBLE)) {
+ static const int32_t cropRegion[4] = {
+ 0, 0, (int32_t)Sensor::kResolution[0], (int32_t)Sensor::kResolution[1]
+ };
+ settings.update(ANDROID_SCALER_CROP_REGION, cropRegion, 4);
+ }
+
+ /** android.jpeg */
+ if (hasCapability(BACKWARD_COMPATIBLE)) {
+ static const uint8_t jpegQuality = 80;
+ settings.update(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+ static const int32_t thumbnailSize[2] = {
+ 640, 480
+ };
+ settings.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+ static const uint8_t thumbnailQuality = 80;
+ settings.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+ static const double gpsCoordinates[2] = {
+ 0, 0
+ };
+ settings.update(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+ static const uint8_t gpsProcessingMethod[32] = "None";
+ settings.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
+
+ static const int64_t gpsTimestamp = 0;
+ settings.update(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+ static const int32_t jpegOrientation = 0;
+ settings.update(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+ }
+
+ /** android.stats */
+
+ if (hasCapability(BACKWARD_COMPATIBLE)) {
+ static const uint8_t faceDetectMode =
+ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+ static const uint8_t hotPixelMapMode =
+ ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+ settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
+ }
+
+ // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
+ // sharpnessMap only in frames
+
+ /** android.control */
+
+ uint8_t controlIntent = 0;
+ switch (type) {
+ case CAMERA3_TEMPLATE_PREVIEW:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ break;
+ case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ break;
+ case CAMERA3_TEMPLATE_VIDEO_RECORD:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ break;
+ case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+ break;
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ break;
+ case CAMERA3_TEMPLATE_MANUAL:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+ break;
+ default:
+ controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+ break;
+ }
+ settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+ const uint8_t controlMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
+ ANDROID_CONTROL_MODE_OFF :
+ ANDROID_CONTROL_MODE_AUTO;
+ settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+ int32_t aeTargetFpsRange[2] = {
+ 5, 30
+ };
+ if (type == CAMERA3_TEMPLATE_VIDEO_RECORD || type == CAMERA3_TEMPLATE_VIDEO_SNAPSHOT) {
+ aeTargetFpsRange[0] = 30;
+ }
+ settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+ if (hasCapability(BACKWARD_COMPATIBLE)) {
+
+ static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+ static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+ settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+ const uint8_t aeMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
+ ANDROID_CONTROL_AE_MODE_OFF :
+ ANDROID_CONTROL_AE_MODE_ON;
+ settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+ static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+ settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+ static const int32_t controlRegions[5] = {
+ 0, 0, 0, 0, 0
+ };
+ settings.update(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+ static const int32_t aeExpCompensation = 0;
+ settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+
+ static const uint8_t aeAntibandingMode =
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+ static const uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+ settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &aePrecaptureTrigger, 1);
+
+ const uint8_t awbMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
+ ANDROID_CONTROL_AWB_MODE_OFF :
+ ANDROID_CONTROL_AWB_MODE_AUTO;
+ settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+ static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+ settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+ uint8_t afMode = 0;
+ switch (type) {
+ case CAMERA3_TEMPLATE_PREVIEW:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ break;
+ case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ break;
+ case CAMERA3_TEMPLATE_VIDEO_RECORD:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+ break;
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+ afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+ break;
+ case CAMERA3_TEMPLATE_MANUAL:
+ afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ break;
+ default:
+ afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+ break;
+ }
+ settings.update(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+
+ settings.update(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+ static const uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ settings.update(ANDROID_CONTROL_AF_TRIGGER, &afTrigger, 1);
+
+ static const uint8_t vstabMode =
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
+
+ static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF;
+ settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1);
+
+ static const uint8_t lensShadingMapMode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+ settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &lensShadingMapMode, 1);
+
+ static const uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+ settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &aberrationMode, 1);
+
+ static const int32_t testPatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+ settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testPatternMode, 1);
+ }
+
+ mDefaultTemplates[type] = settings.release();
+
+ return mDefaultTemplates[type];
+}
+
+status_t EmulatedFakeCamera3::processCaptureRequest(
+ camera3_capture_request *request) {
+
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ /** Validation */
+
+ if (mStatus < STATUS_READY) {
+ ALOGE("%s: Can't submit capture requests in state %d", __FUNCTION__,
+ mStatus);
+ return INVALID_OPERATION;
+ }
+
+ if (request == NULL) {
+ ALOGE("%s: NULL request!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ uint32_t frameNumber = request->frame_number;
+
+ if (request->settings == NULL && mPrevSettings.isEmpty()) {
+ ALOGE("%s: Request %d: NULL settings for first request after"
+ "configureStreams()", __FUNCTION__, frameNumber);
+ return BAD_VALUE;
+ }
+
+ if (request->input_buffer != NULL &&
+ request->input_buffer->stream != mInputStream) {
+ ALOGE("%s: Request %d: Input buffer not from input stream!",
+ __FUNCTION__, frameNumber);
+ ALOGV("%s: Bad stream %p, expected: %p",
+ __FUNCTION__, request->input_buffer->stream,
+ mInputStream);
+ ALOGV("%s: Bad stream type %d, expected stream type %d",
+ __FUNCTION__, request->input_buffer->stream->stream_type,
+ mInputStream ? mInputStream->stream_type : -1);
+
+ return BAD_VALUE;
+ }
+
+ if (request->num_output_buffers < 1 || request->output_buffers == NULL) {
+ ALOGE("%s: Request %d: No output buffers provided!",
+ __FUNCTION__, frameNumber);
+ return BAD_VALUE;
+ }
+
+ // Validate all buffers, starting with input buffer if it's given
+
+ ssize_t idx;
+ const camera3_stream_buffer_t *b;
+ if (request->input_buffer != NULL) {
+ idx = -1;
+ b = request->input_buffer;
+ } else {
+ idx = 0;
+ b = request->output_buffers;
+ }
+ do {
+ PrivateStreamInfo *priv =
+ static_cast(b->stream->priv);
+ if (priv == NULL) {
+ ALOGE("%s: Request %d: Buffer %zu: Unconfigured stream!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (!priv->alive) {
+ ALOGE("%s: Request %d: Buffer %zu: Dead stream!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (b->status != CAMERA3_BUFFER_STATUS_OK) {
+ ALOGE("%s: Request %d: Buffer %zu: Status not OK!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (b->release_fence != -1) {
+ ALOGE("%s: Request %d: Buffer %zu: Has a release fence!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ if (b->buffer == NULL) {
+ ALOGE("%s: Request %d: Buffer %zu: NULL buffer handle!",
+ __FUNCTION__, frameNumber, idx);
+ return BAD_VALUE;
+ }
+ idx++;
+ b = &(request->output_buffers[idx]);
+ } while (idx < (ssize_t)request->num_output_buffers);
+
+ // TODO: Validate settings parameters
+
+ /**
+ * Start processing this request
+ */
+
+ mStatus = STATUS_ACTIVE;
+
+ CameraMetadata settings;
+
+ if (request->settings == NULL) {
+ settings.acquire(mPrevSettings);
+ } else {
+ settings = request->settings;
+ }
+
+ res = process3A(settings);
+ if (res != OK) {
+ return res;
+ }
+
+ // TODO: Handle reprocessing
+
+ /**
+ * Get ready for sensor config
+ */
+
+ nsecs_t exposureTime;
+ nsecs_t frameDuration;
+ uint32_t sensitivity;
+ bool needJpeg = false;
+ camera_metadata_entry_t entry;
+
+ entry = settings.find(ANDROID_SENSOR_EXPOSURE_TIME);
+ exposureTime = (entry.count > 0) ? entry.data.i64[0] : Sensor::kExposureTimeRange[0];
+ entry = settings.find(ANDROID_SENSOR_FRAME_DURATION);
+ frameDuration = (entry.count > 0)? entry.data.i64[0] : Sensor::kFrameDurationRange[0];
+ entry = settings.find(ANDROID_SENSOR_SENSITIVITY);
+ sensitivity = (entry.count > 0) ? entry.data.i32[0] : Sensor::kSensitivityRange[0];
+
+ if (exposureTime > frameDuration) {
+ frameDuration = exposureTime + Sensor::kMinVerticalBlank;
+ settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+ }
+
+ Buffers *sensorBuffers = new Buffers();
+ HalBufferVector *buffers = new HalBufferVector();
+
+ sensorBuffers->setCapacity(request->num_output_buffers);
+ buffers->setCapacity(request->num_output_buffers);
+
+ // Process all the buffers we got for output, constructing internal buffer
+ // structures for them, and lock them for writing.
+ for (size_t i = 0; i < request->num_output_buffers; i++) {
+ const camera3_stream_buffer &srcBuf = request->output_buffers[i];
+ const cb_handle_t *privBuffer =
+ static_cast(*srcBuf.buffer);
+ StreamBuffer destBuf;
+ destBuf.streamId = kGenericStreamId;
+ destBuf.width = srcBuf.stream->width;
+ destBuf.height = srcBuf.stream->height;
+ destBuf.format = privBuffer->format; // Use real private format
+ destBuf.stride = srcBuf.stream->width; // TODO: query from gralloc
+ destBuf.dataSpace = srcBuf.stream->data_space;
+ destBuf.buffer = srcBuf.buffer;
+
+ if (destBuf.format == HAL_PIXEL_FORMAT_BLOB) {
+ needJpeg = true;
+ }
+
+ // Wait on fence
+ sp bufferAcquireFence = new Fence(srcBuf.acquire_fence);
+ res = bufferAcquireFence->wait(kFenceTimeoutMs);
+ if (res == TIMED_OUT) {
+ ALOGE("%s: Request %d: Buffer %zu: Fence timed out after %d ms",
+ __FUNCTION__, frameNumber, i, kFenceTimeoutMs);
+ }
+ if (res == OK) {
+ // Lock buffer for writing
+ const Rect rect(destBuf.width, destBuf.height);
+ if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ if (privBuffer->format == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
+ android_ycbcr ycbcr = android_ycbcr();
+ res = GraphicBufferMapper::get().lockYCbCr(
+ *(destBuf.buffer),
+ GRALLOC_USAGE_HW_CAMERA_WRITE, rect,
+ &ycbcr);
+ // This is only valid because we know that emulator's
+ // YCbCr_420_888 is really contiguous NV21 under the hood
+ destBuf.img = static_cast(ycbcr.y);
+ } else {
+ ALOGE("Unexpected private format for flexible YUV: 0x%x",
+ privBuffer->format);
+ res = INVALID_OPERATION;
+ }
+ } else {
+ res = GraphicBufferMapper::get().lock(*(destBuf.buffer),
+ GRALLOC_USAGE_HW_CAMERA_WRITE, rect,
+ (void**)&(destBuf.img));
+ }
+ if (res != OK) {
+ ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer",
+ __FUNCTION__, frameNumber, i);
+ }
+ }
+
+ if (res != OK) {
+ // Either waiting or locking failed. Unlock locked buffers and bail
+ // out.
+ for (size_t j = 0; j < i; j++) {
+ GraphicBufferMapper::get().unlock(
+ *(request->output_buffers[i].buffer));
+ }
+ return NO_INIT;
+ }
+
+ sensorBuffers->push_back(destBuf);
+ buffers->push_back(srcBuf);
+ }
+
+ /**
+ * Wait for JPEG compressor to not be busy, if needed
+ */
+ if (needJpeg) {
+ bool ready = mJpegCompressor->waitForDone(kFenceTimeoutMs);
+ if (!ready) {
+ ALOGE("%s: Timeout waiting for JPEG compression to complete!",
+ __FUNCTION__);
+ return NO_INIT;
+ }
+ }
+
+ /**
+ * Wait until the in-flight queue has room
+ */
+ res = mReadoutThread->waitForReadout();
+ if (res != OK) {
+ ALOGE("%s: Timeout waiting for previous requests to complete!",
+ __FUNCTION__);
+ return NO_INIT;
+ }
+
+ /**
+ * Wait until sensor's ready. This waits for lengthy amounts of time with
+ * mLock held, but the interface spec is that no other calls may by done to
+ * the HAL by the framework while process_capture_request is happening.
+ */
+ int syncTimeoutCount = 0;
+ while(!mSensor->waitForVSync(kSyncWaitTimeout)) {
+ if (mStatus == STATUS_ERROR) {
+ return NO_INIT;
+ }
+ if (syncTimeoutCount == kMaxSyncTimeoutCount) {
+ ALOGE("%s: Request %d: Sensor sync timed out after %" PRId64 " ms",
+ __FUNCTION__, frameNumber,
+ kSyncWaitTimeout * kMaxSyncTimeoutCount / 1000000);
+ return NO_INIT;
+ }
+ syncTimeoutCount++;
+ }
+
+ /**
+ * Configure sensor and queue up the request to the readout thread
+ */
+ mSensor->setExposureTime(exposureTime);
+ mSensor->setFrameDuration(frameDuration);
+ mSensor->setSensitivity(sensitivity);
+ mSensor->setDestinationBuffers(sensorBuffers);
+ mSensor->setFrameNumber(request->frame_number);
+
+ ReadoutThread::Request r;
+ r.frameNumber = request->frame_number;
+ r.settings = settings;
+ r.sensorBuffers = sensorBuffers;
+ r.buffers = buffers;
+
+ mReadoutThread->queueCaptureRequest(r);
+ ALOGVV("%s: Queued frame %d", __FUNCTION__, request->frame_number);
+
+ // Cache the settings for next time
+ mPrevSettings.acquire(settings);
+
+ return OK;
+}
+
+status_t EmulatedFakeCamera3::flush() {
+ ALOGW("%s: Not implemented; ignored", __FUNCTION__);
+ return OK;
+}
+
+/** Debug methods */
+
+void EmulatedFakeCamera3::dump(int fd) {
+
+}
+
+/**
+ * Private methods
+ */
+
+status_t EmulatedFakeCamera3::getCameraCapabilities() {
+
+ const char *key = mFacingBack ? "qemu.sf.back_camera_caps" : "qemu.sf.front_camera_caps";
+
+ /* Defined by 'qemu.sf.*_camera_caps' boot property: if the
+ * property doesn't exist, it is assumed to list FULL. */
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get(key, prop, NULL) > 0) {
+ char *saveptr = nullptr;
+ char *cap = strtok_r(prop, " ,", &saveptr);
+ while (cap != NULL) {
+ for (int i = 0; i < NUM_CAPABILITIES; i++) {
+ if (!strcasecmp(cap, sAvailableCapabilitiesStrings[i])) {
+ mCapabilities.add(static_cast