Merge "checkpoint tablet status bar"
diff --git a/api/current.xml b/api/current.xml
index ea7804c..7131eb9 100644
--- a/api/current.xml
+++ b/api/current.xml
@@ -13955,7 +13955,7 @@
value="17301636"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -13966,7 +13966,7 @@
value="17301637"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -13977,7 +13977,7 @@
value="17301638"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -14021,7 +14021,7 @@
value="17301671"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -14032,7 +14032,7 @@
value="17301672"
static="true"
final="true"
- deprecated="not deprecated"
+ deprecated="deprecated"
visibility="public"
>
</field>
@@ -15341,6 +15341,17 @@
visibility="public"
>
</field>
+<field name="list_content"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="17367073"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
<field name="preference_category"
type="int"
transient="false"
@@ -19879,6 +19890,17 @@
visibility="public"
>
</method>
+<method name="getCustomView"
+ return="android.view.View"
+ abstract="true"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="getMenu"
return="android.view.Menu"
abstract="true"
@@ -19890,6 +19912,28 @@
visibility="public"
>
</method>
+<method name="getSubtitle"
+ return="java.lang.CharSequence"
+ abstract="true"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
+<method name="getTitle"
+ return="java.lang.CharSequence"
+ abstract="true"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="invalidate"
return="void"
abstract="true"
@@ -28465,7 +28509,18 @@
>
<parameter name="shown" type="boolean">
</parameter>
-<parameter name="animate" type="boolean">
+</method>
+<method name="setListShownNoAnimation"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="shown" type="boolean">
</parameter>
</method>
<method name="setSelection"
@@ -28884,7 +28939,9 @@
>
<implements name="android.view.InputQueue.Callback">
</implements>
-<implements name="android.view.SurfaceHolder.Callback">
+<implements name="android.view.SurfaceHolder.Callback2">
+</implements>
+<implements name="android.view.ViewTreeObserver.OnGlobalLayoutListener">
</implements>
<constructor name="NativeActivity"
type="android.app.NativeActivity"
@@ -28894,6 +28951,17 @@
visibility="public"
>
</constructor>
+<method name="onGlobalLayout"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="onInputQueueCreated"
return="void"
abstract="false"
@@ -28965,6 +29033,19 @@
<parameter name="holder" type="android.view.SurfaceHolder">
</parameter>
</method>
+<method name="surfaceRedrawNeeded"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="holder" type="android.view.SurfaceHolder">
+</parameter>
+</method>
<field name="META_DATA_LIB_NAME"
type="java.lang.String"
transient="false"
@@ -68566,6 +68647,17 @@
visibility="public"
>
</field>
+<field name="YV12"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="842094169"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
</class>
<class name="Interpolator"
extends="java.lang.Object"
@@ -147126,6 +147218,19 @@
<parameter name="holder" type="android.view.SurfaceHolder">
</parameter>
</method>
+<method name="onSurfaceRedrawNeeded"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="holder" type="android.view.SurfaceHolder">
+</parameter>
+</method>
<method name="onTouchEvent"
return="void"
abstract="false"
@@ -178389,6 +178494,171 @@
visibility="public"
>
</field>
+<field name="KEYCODE_BUTTON_A"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="96"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_B"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="97"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_C"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="98"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_L1"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="102"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_L2"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="104"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_MODE"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="110"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_R1"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="103"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_R2"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="105"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_SELECT"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="109"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_START"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="108"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_THUMBL"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="106"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_THUMBR"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="107"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_X"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="99"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_Y"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="100"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
+<field name="KEYCODE_BUTTON_Z"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="101"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
<field name="KEYCODE_C"
type="int"
transient="false"
@@ -183042,6 +183312,29 @@
</parameter>
</method>
</interface>
+<interface name="SurfaceHolder.Callback2"
+ abstract="true"
+ static="true"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<implements name="android.view.SurfaceHolder.Callback">
+</implements>
+<method name="surfaceRedrawNeeded"
+ return="void"
+ abstract="true"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="holder" type="android.view.SurfaceHolder">
+</parameter>
+</method>
+</interface>
<class name="SurfaceView"
extends="android.view.View"
abstract="false"
@@ -183985,6 +184278,17 @@
visibility="public"
>
</method>
+<method name="getAlpha"
+ return="float"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="getAnimation"
return="android.view.animation.Animation"
abstract="false"
@@ -184418,6 +184722,17 @@
<parameter name="location" type="int[]">
</parameter>
</method>
+<method name="getMatrix"
+ return="android.graphics.Matrix"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="getMeasuredHeight"
return="int"
abstract="false"
@@ -184550,6 +184865,28 @@
visibility="public"
>
</method>
+<method name="getPivotX"
+ return="float"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
+<method name="getPivotY"
+ return="float"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="getResources"
return="android.content.res.Resources"
abstract="false"
@@ -184605,6 +184942,39 @@
visibility="public"
>
</method>
+<method name="getRotation"
+ return="float"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
+<method name="getScaleX"
+ return="float"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
+<method name="getScaleY"
+ return="float"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="getScrollBarStyle"
return="int"
abstract="false"
@@ -184851,6 +185221,28 @@
<parameter name="outRect" type="android.graphics.Rect">
</parameter>
</method>
+<method name="getX"
+ return="int"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
+<method name="getY"
+ return="int"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</method>
<method name="hasFocus"
return="boolean"
abstract="false"
@@ -186181,6 +186573,19 @@
<parameter name="event" type="android.view.accessibility.AccessibilityEvent">
</parameter>
</method>
+<method name="setAlpha"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="alpha" type="float">
+</parameter>
+</method>
<method name="setAnimation"
return="void"
abstract="false"
@@ -186233,6 +186638,19 @@
<parameter name="resid" type="int">
</parameter>
</method>
+<method name="setBottom"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="bottom" type="int">
+</parameter>
+</method>
<method name="setClickable"
return="void"
abstract="false"
@@ -186441,6 +186859,19 @@
<parameter name="params" type="android.view.ViewGroup.LayoutParams">
</parameter>
</method>
+<method name="setLeft"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="left" type="int">
+</parameter>
+</method>
<method name="setLongClickable"
return="void"
abstract="false"
@@ -186644,6 +187075,32 @@
<parameter name="bottom" type="int">
</parameter>
</method>
+<method name="setPivotX"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="pivotX" type="float">
+</parameter>
+</method>
+<method name="setPivotY"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="pivotY" type="float">
+</parameter>
+</method>
<method name="setPressed"
return="void"
abstract="false"
@@ -186657,6 +187114,32 @@
<parameter name="pressed" type="boolean">
</parameter>
</method>
+<method name="setRight"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="right" type="int">
+</parameter>
+</method>
+<method name="setRotation"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="rotation" type="float">
+</parameter>
+</method>
<method name="setSaveEnabled"
return="void"
abstract="false"
@@ -186683,6 +187166,32 @@
<parameter name="enabled" type="boolean">
</parameter>
</method>
+<method name="setScaleX"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="scaleX" type="float">
+</parameter>
+</method>
+<method name="setScaleY"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="scaleY" type="float">
+</parameter>
+</method>
<method name="setScrollBarStyle"
return="void"
abstract="false"
@@ -186776,6 +187285,19 @@
<parameter name="tag" type="java.lang.Object">
</parameter>
</method>
+<method name="setTop"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="top" type="int">
+</parameter>
+</method>
<method name="setTouchDelegate"
return="void"
abstract="false"
@@ -186854,6 +187376,32 @@
<parameter name="willNotDraw" type="boolean">
</parameter>
</method>
+<method name="setX"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="x" type="int">
+</parameter>
+</method>
+<method name="setY"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="y" type="int">
+</parameter>
+</method>
<method name="showContextMenu"
return="boolean"
abstract="false"
@@ -191681,7 +192229,7 @@
deprecated="not deprecated"
visibility="public"
>
-<parameter name="callback" type="android.view.SurfaceHolder.Callback">
+<parameter name="callback" type="android.view.SurfaceHolder.Callback2">
</parameter>
</method>
<method name="togglePanel"
@@ -204199,6 +204747,36 @@
<parameter name="outState" type="android.os.Bundle">
</parameter>
</method>
+<method name="saveWebArchive"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="filename" type="java.lang.String">
+</parameter>
+</method>
+<method name="saveWebArchive"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="basename" type="java.lang.String">
+</parameter>
+<parameter name="autoname" type="boolean">
+</parameter>
+<parameter name="callback" type="android.webkit.ValueCallback<java.lang.String>">
+</parameter>
+</method>
<method name="setCertificate"
return="void"
abstract="false"
@@ -213903,6 +214481,19 @@
<parameter name="d" type="android.graphics.drawable.Drawable">
</parameter>
</method>
+<method name="setContentWidth"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="false"
+ final="false"
+ deprecated="not deprecated"
+ visibility="public"
+>
+<parameter name="width" type="int">
+</parameter>
+</method>
<method name="setHeight"
return="void"
abstract="false"
@@ -222885,7 +223476,7 @@
</parameter>
</method>
<method name="getOffsetX"
- return="int"
+ return="float"
abstract="true"
native="false"
synchronized="false"
@@ -222896,7 +223487,7 @@
>
</method>
<method name="getOffsetY"
- return="int"
+ return="float"
abstract="true"
native="false"
synchronized="false"
@@ -222951,7 +223542,20 @@
deprecated="not deprecated"
visibility="public"
>
+<parameter name="offset" type="int">
+</parameter>
</method>
+<field name="FADE_OUT_DURATION"
+ type="int"
+ transient="false"
+ volatile="false"
+ value="400"
+ static="true"
+ final="true"
+ deprecated="not deprecated"
+ visibility="public"
+>
+</field>
</interface>
<interface name="TextView.OnEditorActionListener"
abstract="true"
diff --git a/cmds/am/src/com/android/commands/am/Am.java b/cmds/am/src/com/android/commands/am/Am.java
index 301883f..fb60fdf 100644
--- a/cmds/am/src/com/android/commands/am/Am.java
+++ b/cmds/am/src/com/android/commands/am/Am.java
@@ -98,6 +98,8 @@
sendBroadcast();
} else if (op.equals("profile")) {
runProfile();
+ } else if (op.equals("dumpheap")) {
+ runDumpHeap();
} else {
throw new IllegalArgumentException("Unknown command: " + op);
}
@@ -424,6 +426,28 @@
}
}
+ private void runDumpHeap() throws Exception {
+ boolean managed = !"-n".equals(nextOption());
+ String process = nextArgRequired();
+ String heapFile = nextArgRequired();
+ ParcelFileDescriptor fd = null;
+
+ try {
+ fd = ParcelFileDescriptor.open(
+ new File(heapFile),
+ ParcelFileDescriptor.MODE_CREATE |
+ ParcelFileDescriptor.MODE_TRUNCATE |
+ ParcelFileDescriptor.MODE_READ_WRITE);
+ } catch (FileNotFoundException e) {
+ System.err.println("Error: Unable to open file: " + heapFile);
+ return;
+ }
+
+ if (!mAm.dumpHeap(process, managed, heapFile, fd)) {
+ throw new AndroidException("HEAP DUMP FAILED on process " + process);
+ }
+ }
+
private class IntentReceiver extends IIntentReceiver.Stub {
private boolean mFinished = false;
@@ -593,6 +617,8 @@
"\n" +
" start profiling: am profile <PROCESS> start <FILE>\n" +
" stop profiling: am profile <PROCESS> stop\n" +
+ " dump heap: am dumpheap [flags] <PROCESS> <FILE>\n" +
+ " -n: dump native heap instead of managed heap\n" +
"\n" +
" <INTENT> specifications include these flags:\n" +
" [-a <ACTION>] [-d <DATA_URI>] [-t <MIME_TYPE>]\n" +
diff --git a/cmds/surfaceflinger/Android.mk b/cmds/surfaceflinger/Android.mk
index bfa58a1..1df32bb 100644
--- a/cmds/surfaceflinger/Android.mk
+++ b/cmds/surfaceflinger/Android.mk
@@ -10,7 +10,7 @@
libutils
LOCAL_C_INCLUDES := \
- $(LOCAL_PATH)/../../libs/surfaceflinger
+ $(LOCAL_PATH)/../../services/surfaceflinger
LOCAL_MODULE:= surfaceflinger
diff --git a/cmds/surfaceflinger/main_surfaceflinger.cpp b/cmds/surfaceflinger/main_surfaceflinger.cpp
index d650721..78b1007 100644
--- a/cmds/surfaceflinger/main_surfaceflinger.cpp
+++ b/cmds/surfaceflinger/main_surfaceflinger.cpp
@@ -1,18 +1,25 @@
-#include <binder/IPCThreadState.h>
-#include <binder/ProcessState.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <binder/BinderService.h>
#include <SurfaceFlinger.h>
using namespace android;
-int main(int argc, char** argv)
-{
- sp<ProcessState> proc(ProcessState::self());
- sp<IServiceManager> sm = defaultServiceManager();
- LOGI("ServiceManager: %p", sm.get());
- SurfaceFlinger::instantiate();
- ProcessState::self()->startThreadPool();
- IPCThreadState::self()->joinThreadPool();
+int main(int argc, char** argv) {
+ SurfaceFlinger::publishAndJoinThreadPool();
+ return 0;
}
diff --git a/cmds/system_server/library/Android.mk b/cmds/system_server/library/Android.mk
index 1813d3e..a880a91 100644
--- a/cmds/system_server/library/Android.mk
+++ b/cmds/system_server/library/Android.mk
@@ -7,9 +7,9 @@
base = $(LOCAL_PATH)/../../..
LOCAL_C_INCLUDES := \
- $(base)/camera/libcameraservice \
- $(base)/libs/audioflinger \
- $(base)/libs/surfaceflinger \
+ $(base)/services/camera/libcameraservice \
+ $(base)/services/audioflinger \
+ $(base)/services/surfaceflinger \
$(base)/media/libmediaplayerservice \
$(JNI_H_INCLUDE)
diff --git a/core/java/android/app/ActionBar.java b/core/java/android/app/ActionBar.java
index d0b3ac4..3cd2b9e 100644
--- a/core/java/android/app/ActionBar.java
+++ b/core/java/android/app/ActionBar.java
@@ -368,6 +368,24 @@
* @return The context mode's menu.
*/
public abstract Menu getMenu();
+
+ /**
+ * Returns the current title of this context mode.
+ * @return Title text
+ */
+ public abstract CharSequence getTitle();
+
+ /**
+ * Returns the current subtitle of this context mode.
+ * @return Subtitle text
+ */
+ public abstract CharSequence getSubtitle();
+
+ /**
+ * Returns the current custom view for this context mode.
+ * @return The current custom view
+ */
+ public abstract View getCustomView();
}
/**
diff --git a/core/java/android/app/Activity.java b/core/java/android/app/Activity.java
index 20272df..91e4cd5 100644
--- a/core/java/android/app/Activity.java
+++ b/core/java/android/app/Activity.java
@@ -71,6 +71,7 @@
import android.view.ViewGroup.LayoutParams;
import android.view.accessibility.AccessibilityEvent;
import android.widget.AdapterView;
+import android.widget.FrameLayout;
import android.widget.LinearLayout;
import com.android.internal.app.ActionBarImpl;
@@ -1270,19 +1271,37 @@
* @see #onPause
*/
public boolean onCreateThumbnail(Bitmap outBitmap, Canvas canvas) {
- final View view = mDecor;
- if (view == null) {
+ if (mDecor == null) {
return false;
}
- final int vw = view.getWidth();
- final int vh = view.getHeight();
- final int dw = outBitmap.getWidth();
- final int dh = outBitmap.getHeight();
+ int paddingLeft = 0;
+ int paddingRight = 0;
+ int paddingTop = 0;
+ int paddingBottom = 0;
+
+ // Find System window and use padding so we ignore space reserved for decorations
+ // like the status bar and such.
+ final FrameLayout top = (FrameLayout) mDecor;
+ for (int i = 0; i < top.getChildCount(); i++) {
+ View child = top.getChildAt(i);
+ if (child.isFitsSystemWindowsFlagSet()) {
+ paddingLeft = child.getPaddingLeft();
+ paddingRight = child.getPaddingRight();
+ paddingTop = child.getPaddingTop();
+ paddingBottom = child.getPaddingBottom();
+ break;
+ }
+ }
+
+ final int visibleWidth = mDecor.getWidth() - paddingLeft - paddingRight;
+ final int visibleHeight = mDecor.getHeight() - paddingTop - paddingBottom;
canvas.save();
- canvas.scale(((float)dw)/vw, ((float)dh)/vh);
- view.draw(canvas);
+ canvas.scale( (float) outBitmap.getWidth() / visibleWidth,
+ (float) outBitmap.getHeight() / visibleHeight);
+ canvas.translate(-paddingLeft, -paddingTop);
+ mDecor.draw(canvas);
canvas.restore();
return true;
@@ -1567,6 +1586,12 @@
return new BackStackEntry(mFragments);
}
+ void invalidateFragmentIndex(int index) {
+ if (mAllLoaderManagers != null) {
+ mAllLoaderManagers.remove(index);
+ }
+ }
+
/**
* Called when a Fragment is being attached to this activity, immediately
* after the call to its {@link Fragment#onAttach Fragment.onAttach()}
diff --git a/core/java/android/app/ActivityManager.java b/core/java/android/app/ActivityManager.java
index eb7520f..d66e98b 100644
--- a/core/java/android/app/ActivityManager.java
+++ b/core/java/android/app/ActivityManager.java
@@ -285,24 +285,54 @@
* @param maxNum The maximum number of entries to return in the list. The
* actual number returned may be smaller, depending on how many tasks the
* user has started.
- *
+ *
+ * @param flags Optional flags
+ * @param receiver Optional receiver for delayed thumbnails
+ *
* @return Returns a list of RunningTaskInfo records describing each of
* the running tasks.
*
+ * Some thumbnails may not be available at the time of this call. The optional
+ * receiver may be used to receive those thumbnails.
+ *
* @throws SecurityException Throws SecurityException if the caller does
* not hold the {@link android.Manifest.permission#GET_TASKS} permission.
+ *
+ * @hide
*/
- public List<RunningTaskInfo> getRunningTasks(int maxNum)
+ public List<RunningTaskInfo> getRunningTasks(int maxNum, int flags, IThumbnailReceiver receiver)
throws SecurityException {
try {
- return (List<RunningTaskInfo>)ActivityManagerNative.getDefault()
- .getTasks(maxNum, 0, null);
+ return ActivityManagerNative.getDefault().getTasks(maxNum, flags, receiver);
} catch (RemoteException e) {
// System dead, we will be dead too soon!
return null;
}
}
-
+
+ /**
+ * Return a list of the tasks that are currently running, with
+ * the most recent being first and older ones after in order. Note that
+ * "running" does not mean any of the task's code is currently loaded or
+ * activity -- the task may have been frozen by the system, so that it
+ * can be restarted in its previous state when next brought to the
+ * foreground.
+ *
+ * @param maxNum The maximum number of entries to return in the list. The
+ * actual number returned may be smaller, depending on how many tasks the
+ * user has started.
+ *
+ * @return Returns a list of RunningTaskInfo records describing each of
+ * the running tasks.
+ *
+ * @throws SecurityException Throws SecurityException if the caller does
+ * not hold the {@link android.Manifest.permission#GET_TASKS} permission.
+ */
+ public List<RunningTaskInfo> getRunningTasks(int maxNum)
+ throws SecurityException {
+ return getRunningTasks(maxNum, 0, null);
+ }
+
/**
* Information you can retrieve about a particular Service that is
* currently running in the system.
diff --git a/core/java/android/app/ActivityManagerNative.java b/core/java/android/app/ActivityManagerNative.java
index 1fe85e6..43a08b5 100644
--- a/core/java/android/app/ActivityManagerNative.java
+++ b/core/java/android/app/ActivityManagerNative.java
@@ -1294,6 +1294,19 @@
return true;
}
+ case DUMP_HEAP_TRANSACTION: {
+ data.enforceInterface(IActivityManager.descriptor);
+ String process = data.readString();
+ boolean managed = data.readInt() != 0;
+ String path = data.readString();
+ ParcelFileDescriptor fd = data.readInt() != 0
+ ? data.readFileDescriptor() : null;
+ boolean res = dumpHeap(process, managed, path, fd);
+ reply.writeNoException();
+ reply.writeInt(res ? 1 : 0);
+ return true;
+ }
+
}
return super.onTransact(code, data, reply, flags);
@@ -2874,6 +2887,28 @@
data.recycle();
reply.recycle();
}
-
+
+ public boolean dumpHeap(String process, boolean managed,
+ String path, ParcelFileDescriptor fd) throws RemoteException {
+ Parcel data = Parcel.obtain();
+ Parcel reply = Parcel.obtain();
+ data.writeInterfaceToken(IActivityManager.descriptor);
+ data.writeString(process);
+ data.writeInt(managed ? 1 : 0);
+ data.writeString(path);
+ if (fd != null) {
+ data.writeInt(1);
+ fd.writeToParcel(data, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+ } else {
+ data.writeInt(0);
+ }
+ mRemote.transact(DUMP_HEAP_TRANSACTION, data, reply, 0);
+ reply.readException();
+ boolean res = reply.readInt() != 0;
+ reply.recycle();
+ data.recycle();
+ return res;
+ }
+
private IBinder mRemote;
}
diff --git a/core/java/android/app/ActivityThread.java b/core/java/android/app/ActivityThread.java
index d788be8..c800fbe 100644
--- a/core/java/android/app/ActivityThread.java
+++ b/core/java/android/app/ActivityThread.java
@@ -116,6 +116,7 @@
*/
public final class ActivityThread {
static final String TAG = "ActivityThread";
+ private static final android.graphics.Bitmap.Config THUMBNAIL_FORMAT = Bitmap.Config.RGB_565;
private static final boolean DEBUG = false;
static final boolean localLOGV = DEBUG ? Config.LOGD : Config.LOGV;
static final boolean DEBUG_BROADCAST = false;
@@ -356,6 +357,11 @@
ParcelFileDescriptor fd;
}
+ private static final class DumpHeapData {
+ String path;
+ ParcelFileDescriptor fd;
+ }
+
private final class ApplicationThread extends ApplicationThreadNative {
private static final String HEAP_COLUMN = "%17s %8s %8s %8s %8s";
private static final String ONE_COUNT_COLUMN = "%17s %8d";
@@ -623,6 +629,13 @@
queueOrSendMessage(H.PROFILER_CONTROL, pcd, start ? 1 : 0);
}
+ public void dumpHeap(boolean managed, String path, ParcelFileDescriptor fd) {
+ DumpHeapData dhd = new DumpHeapData();
+ dhd.path = path;
+ dhd.fd = fd;
+ queueOrSendMessage(H.DUMP_HEAP, dhd, managed ? 1 : 0);
+ }
+
public void setSchedulingGroup(int group) {
// Note: do this immediately, since going into the foreground
// should happen regardless of what pending work we have to do
@@ -874,6 +887,7 @@
public static final int ENABLE_JIT = 132;
public static final int DISPATCH_PACKAGE_BROADCAST = 133;
public static final int SCHEDULE_CRASH = 134;
+ public static final int DUMP_HEAP = 135;
String codeToString(int code) {
if (localLOGV) {
switch (code) {
@@ -912,6 +926,7 @@
case ENABLE_JIT: return "ENABLE_JIT";
case DISPATCH_PACKAGE_BROADCAST: return "DISPATCH_PACKAGE_BROADCAST";
case SCHEDULE_CRASH: return "SCHEDULE_CRASH";
+ case DUMP_HEAP: return "DUMP_HEAP";
}
}
return "(unknown)";
@@ -1037,6 +1052,9 @@
break;
case SCHEDULE_CRASH:
throw new RemoteServiceException((String)msg.obj);
+ case DUMP_HEAP:
+ handleDumpHeap(msg.arg1 != 0, (DumpHeapData)msg.obj);
+ break;
}
}
@@ -2228,13 +2246,24 @@
h = mThumbnailHeight;
}
- // XXX Only set hasAlpha if needed?
- thumbnail = Bitmap.createBitmap(w, h, Bitmap.Config.RGB_565);
- thumbnail.eraseColor(0);
- Canvas cv = new Canvas(thumbnail);
- if (!r.activity.onCreateThumbnail(thumbnail, cv)) {
- thumbnail = null;
+ // On platforms where we don't want thumbnails, set dims to (0,0)
+ if ((w > 0) && (h > 0)) {
+ View topView = r.activity.getWindow().getDecorView();
+
+ // Maximize bitmap by capturing in native aspect.
+ if (topView.getWidth() >= topView.getHeight()) {
+ thumbnail = Bitmap.createBitmap(w, h, THUMBNAIL_FORMAT);
+ } else {
+ thumbnail = Bitmap.createBitmap(h, w, THUMBNAIL_FORMAT);
+ }
+
+ thumbnail.eraseColor(0);
+ Canvas cv = new Canvas(thumbnail);
+ if (!r.activity.onCreateThumbnail(thumbnail, cv)) {
+ thumbnail = null;
+ }
}
+
} catch (Exception e) {
if (!mInstrumentation.onException(r.activity, e)) {
throw new RuntimeException(
@@ -2365,7 +2394,7 @@
if (info != null) {
try {
// First create a thumbnail for the activity...
- //info.thumbnail = createThumbnailBitmap(r);
+ info.thumbnail = createThumbnailBitmap(r);
info.description = r.activity.onCreateDescription();
} catch (Exception e) {
if (!mInstrumentation.onException(r.activity, e)) {
@@ -3015,6 +3044,25 @@
}
}
+ final void handleDumpHeap(boolean managed, DumpHeapData dhd) {
+ if (managed) {
+ try {
+ Debug.dumpHprofData(dhd.path, dhd.fd.getFileDescriptor());
+ } catch (IOException e) {
+ Slog.w(TAG, "Managed heap dump failed on path " + dhd.path
+ + " -- can the process access this path?");
+ } finally {
+ try {
+ dhd.fd.close();
+ } catch (IOException e) {
+ Slog.w(TAG, "Failure closing profile fd", e);
+ }
+ }
+ } else {
+ Debug.dumpNativeHeap(dhd.fd.getFileDescriptor());
+ }
+ }
+
final void handleDispatchPackageBroadcast(int cmd, String[] packages) {
boolean hasPkgInfo = false;
if (packages != null) {
diff --git a/core/java/android/app/ApplicationThreadNative.java b/core/java/android/app/ApplicationThreadNative.java
index 1c20062..dc2145f 100644
--- a/core/java/android/app/ApplicationThreadNative.java
+++ b/core/java/android/app/ApplicationThreadNative.java
@@ -403,6 +403,17 @@
scheduleCrash(msg);
return true;
}
+
+ case DUMP_HEAP_TRANSACTION:
+ {
+ data.enforceInterface(IApplicationThread.descriptor);
+ boolean managed = data.readInt() != 0;
+ String path = data.readString();
+ ParcelFileDescriptor fd = data.readInt() != 0
+ ? data.readFileDescriptor() : null;
+ dumpHeap(managed, path, fd);
+ return true;
+ }
}
return super.onTransact(code, data, reply, flags);
@@ -829,5 +840,22 @@
data.recycle();
}
+
+ public void dumpHeap(boolean managed, String path,
+ ParcelFileDescriptor fd) throws RemoteException {
+ Parcel data = Parcel.obtain();
+ data.writeInterfaceToken(IApplicationThread.descriptor);
+ data.writeInt(managed ? 1 : 0);
+ data.writeString(path);
+ if (fd != null) {
+ data.writeInt(1);
+ fd.writeToParcel(data, Parcelable.PARCELABLE_WRITE_RETURN_VALUE);
+ } else {
+ data.writeInt(0);
+ }
+ mRemote.transact(DUMP_HEAP_TRANSACTION, data, null,
+ IBinder.FLAG_ONEWAY);
+ data.recycle();
+ }
}
diff --git a/core/java/android/app/ContextImpl.java b/core/java/android/app/ContextImpl.java
index 8a9a5bb..a2a74f8 100644
--- a/core/java/android/app/ContextImpl.java
+++ b/core/java/android/app/ContextImpl.java
@@ -67,6 +67,7 @@
import android.media.AudioManager;
import android.net.ConnectivityManager;
import android.net.IConnectivityManager;
+import android.net.DownloadManager;
import android.net.ThrottleManager;
import android.net.IThrottleManager;
import android.net.Uri;
@@ -197,6 +198,7 @@
private DropBoxManager mDropBoxManager = null;
private DevicePolicyManager mDevicePolicyManager = null;
private UiModeManager mUiModeManager = null;
+ private DownloadManager mDownloadManager = null;
private final Object mSync = new Object();
@@ -981,6 +983,8 @@
return getDevicePolicyManager();
} else if (UI_MODE_SERVICE.equals(name)) {
return getUiModeManager();
+ } else if (DOWNLOAD_SERVICE.equals(name)) {
+ return getDownloadManager();
}
return null;
@@ -1199,6 +1203,15 @@
return mUiModeManager;
}
+ private DownloadManager getDownloadManager() {
+ synchronized (mSync) {
+ if (mDownloadManager == null) {
+ mDownloadManager = new DownloadManager(getContentResolver());
+ }
+ }
+ return mDownloadManager;
+ }
+
@Override
public int checkPermission(String permission, int pid, int uid) {
if (permission == null) {
@@ -1702,8 +1715,9 @@
if (resolveInfo == null) {
return null;
}
- Intent intent = new Intent(Intent.ACTION_MAIN);
- intent.setClassName(packageName, resolveInfo.activityInfo.name);
+ Intent intent = new Intent(intentToResolve);
+ intent.setClassName(resolveInfo.activityInfo.applicationInfo.packageName,
+ resolveInfo.activityInfo.name);
intent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent;
}
diff --git a/core/java/android/app/FragmentManager.java b/core/java/android/app/FragmentManager.java
index cb928a7..4f3043c 100644
--- a/core/java/android/app/FragmentManager.java
+++ b/core/java/android/app/FragmentManager.java
@@ -376,6 +376,7 @@
mAvailIndices = new ArrayList<Integer>();
}
mAvailIndices.add(f.mIndex);
+ mActivity.invalidateFragmentIndex(f.mIndex);
f.clearIndex();
}
diff --git a/core/java/android/app/IActivityManager.java b/core/java/android/app/IActivityManager.java
index 20c9a80..8ea59a7 100644
--- a/core/java/android/app/IActivityManager.java
+++ b/core/java/android/app/IActivityManager.java
@@ -316,7 +316,11 @@
public void crashApplication(int uid, int initialPid, String packageName,
String message) throws RemoteException;
-
+
+ // Cause the specified process to dump the specified heap.
+ public boolean dumpHeap(String process, boolean managed, String path,
+ ParcelFileDescriptor fd) throws RemoteException;
+
/*
* Private non-Binder interfaces
*/
@@ -533,4 +537,5 @@
int SET_IMMERSIVE_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+111;
int IS_TOP_ACTIVITY_IMMERSIVE_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+112;
int CRASH_APPLICATION_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+113;
+ int DUMP_HEAP_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+114;
}
diff --git a/core/java/android/app/IApplicationThread.java b/core/java/android/app/IApplicationThread.java
index c8ef17f..039bcb9 100644
--- a/core/java/android/app/IApplicationThread.java
+++ b/core/java/android/app/IApplicationThread.java
@@ -97,6 +97,8 @@
void scheduleActivityConfigurationChanged(IBinder token) throws RemoteException;
void profilerControl(boolean start, String path, ParcelFileDescriptor fd)
throws RemoteException;
+ void dumpHeap(boolean managed, String path, ParcelFileDescriptor fd)
+ throws RemoteException;
void setSchedulingGroup(int group) throws RemoteException;
void getMemoryInfo(Debug.MemoryInfo outInfo) throws RemoteException;
static final int PACKAGE_REMOVED = 0;
@@ -140,4 +142,5 @@
int SCHEDULE_SUICIDE_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+32;
int DISPATCH_PACKAGE_BROADCAST_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+33;
int SCHEDULE_CRASH_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+34;
+ int DUMP_HEAP_TRANSACTION = IBinder.FIRST_CALL_TRANSACTION+35;
}
diff --git a/core/java/android/app/ListActivity.java b/core/java/android/app/ListActivity.java
index 4bf5518..d49968f 100644
--- a/core/java/android/app/ListActivity.java
+++ b/core/java/android/app/ListActivity.java
@@ -309,7 +309,7 @@
if (mList != null) {
return;
}
- setContentView(com.android.internal.R.layout.list_content);
+ setContentView(com.android.internal.R.layout.list_content_simple);
}
diff --git a/core/java/android/app/ListFragment.java b/core/java/android/app/ListFragment.java
index 96485f7..73ef869 100644
--- a/core/java/android/app/ListFragment.java
+++ b/core/java/android/app/ListFragment.java
@@ -172,11 +172,17 @@
* is {@link android.R.id#list android.R.id.list} and can optionally
* have a sibling view id {@link android.R.id#empty android.R.id.empty}
* that is to be shown when the list is empty.
+ *
+ * <p>If you are overriding this method with your own custom content,
+ * consider including the standard layout {@link android.R.layout#list_content}
+ * in your layout file, so that you continue to retain all of the standard
+ * behavior of ListFragment. In particular, this is currently the only
+ * way to have the built-in indeterminant progress state be shown.
*/
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
- return inflater.inflate(com.android.internal.R.layout.list_content_rich,
+ return inflater.inflate(com.android.internal.R.layout.list_content,
container, false);
}
@@ -217,9 +223,15 @@
* Provide the cursor for the list view.
*/
public void setListAdapter(ListAdapter adapter) {
+ boolean hadAdapter = mAdapter != null;
mAdapter = adapter;
if (mList != null) {
mList.setAdapter(adapter);
+ if (!mListShown && !hadAdapter) {
+ // The list was hidden, and previously didn't have an
+ // adapter. It is now time to show it.
+ setListShown(true, getView().getWindowToken() != null);
+ }
}
}
@@ -276,12 +288,38 @@
* displayed if you are waiting for the initial data to show in it. During
* this time an indeterminant progress indicator will be shown instead.
*
+ * <p>Applications do not normally need to use this themselves. The default
+ * behavior of ListFragment is to start with the list not being shown, only
+ * showing it once an adapter is given with {@link #setListAdapter(ListAdapter)}.
+ * If the list at that point had not been shown, when it does get shown
+ * it will be do without the user ever seeing the hidden state.
+ *
+ * @param shown If true, the list view is shown; if false, the progress
+ * indicator. The initial value is true.
+ */
+ public void setListShown(boolean shown) {
+ setListShown(shown, true);
+ }
+
+ /**
+ * Like {@link #setListShown(boolean)}, but no animation is used when
+ * transitioning from the previous state.
+ */
+ public void setListShownNoAnimation(boolean shown) {
+ setListShown(shown, false);
+ }
+
+ /**
+ * Control whether the list is being displayed. You can make it not
+ * displayed if you are waiting for the initial data to show in it. During
+ * this time an indeterminant progress indicator will be shown instead.
+ *
* @param shown If true, the list view is shown; if false, the progress
* indicator. The initial value is true.
* @param animate If true, an animation will be used to transition to the
* new state.
*/
- public void setListShown(boolean shown, boolean animate) {
+ private void setListShown(boolean shown, boolean animate) {
ensureList();
if (mProgressContainer == null) {
throw new IllegalStateException("Can't be used with a custom content view");
@@ -356,6 +394,12 @@
mList.setOnItemClickListener(mOnClickListener);
if (mAdapter != null) {
setListAdapter(mAdapter);
+ } else {
+ // We are starting without an adapter, so assume we won't
+ // have our data right away and start with the progress indicator.
+ if (mProgressContainer != null) {
+ setListShown(false, false);
+ }
}
mHandler.post(mRequestFocus);
}
diff --git a/core/java/android/app/LoaderManager.java b/core/java/android/app/LoaderManager.java
index 31e3c40..7600899 100644
--- a/core/java/android/app/LoaderManager.java
+++ b/core/java/android/app/LoaderManager.java
@@ -180,7 +180,7 @@
* will be called as the loader state changes. If at the point of call
* the caller is in its started state, and the requested loader
* already exists and has generated its data, then
- * callback.{@link LoaderCallbacks#onLoadFinished(Loader, Object)} will
+ * callback. {@link LoaderCallbacks#onLoadFinished} will
* be called immediately (inside of this function), so you must be prepared
* for this to happen.
*/
diff --git a/core/java/android/app/NativeActivity.java b/core/java/android/app/NativeActivity.java
index d72dda7..ccc9ae3 100644
--- a/core/java/android/app/NativeActivity.java
+++ b/core/java/android/app/NativeActivity.java
@@ -2,6 +2,7 @@
import dalvik.system.PathClassLoader;
+import android.content.Context;
import android.content.pm.ActivityInfo;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager;
@@ -11,12 +12,16 @@
import android.os.Environment;
import android.os.Looper;
import android.os.MessageQueue;
+import android.util.AttributeSet;
import android.view.InputChannel;
import android.view.InputQueue;
import android.view.KeyEvent;
import android.view.Surface;
import android.view.SurfaceHolder;
import android.view.View;
+import android.view.WindowManager;
+import android.view.ViewTreeObserver.OnGlobalLayoutListener;
+import android.view.inputmethod.InputMethodManager;
import java.io.File;
@@ -24,15 +29,26 @@
* Convenience for implementing an activity that will be implemented
* purely in native code. That is, a game (or game-like thing).
*/
-public class NativeActivity extends Activity implements SurfaceHolder.Callback,
- InputQueue.Callback {
+public class NativeActivity extends Activity implements SurfaceHolder.Callback2,
+ InputQueue.Callback, OnGlobalLayoutListener {
public static final String META_DATA_LIB_NAME = "android.app.lib_name";
+ private NativeContentView mNativeContentView;
+ private InputMethodManager mIMM;
+
private int mNativeHandle;
private InputQueue mCurInputQueue;
private SurfaceHolder mCurSurfaceHolder;
+ final int[] mLocation = new int[2];
+ int mLastContentX;
+ int mLastContentY;
+ int mLastContentWidth;
+ int mLastContentHeight;
+
+ private boolean mDispatchingUnhandledKey;
+
private boolean mDestroyed;
private native int loadNativeCode(String path, MessageQueue queue,
@@ -49,18 +65,44 @@
private native void onSurfaceCreatedNative(int handle, Surface surface);
private native void onSurfaceChangedNative(int handle, Surface surface,
int format, int width, int height);
+ private native void onSurfaceRedrawNeededNative(int handle, Surface surface);
private native void onSurfaceDestroyedNative(int handle);
private native void onInputChannelCreatedNative(int handle, InputChannel channel);
private native void onInputChannelDestroyedNative(int handle, InputChannel channel);
+ private native void onContentRectChangedNative(int handle, int x, int y, int w, int h);
+ private native void dispatchKeyEventNative(int handle, KeyEvent event);
+
+ static class NativeContentView extends View {
+ NativeActivity mActivity;
+
+ public NativeContentView(Context context) {
+ super(context);
+ }
+
+ public NativeContentView(Context context, AttributeSet attrs) {
+ super(context, attrs);
+ }
+ }
@Override
protected void onCreate(Bundle savedInstanceState) {
String libname = "main";
ActivityInfo ai;
+ mIMM = (InputMethodManager)getSystemService(Context.INPUT_METHOD_SERVICE);
+
getWindow().takeSurface(this);
getWindow().takeInputQueue(this);
getWindow().setFormat(PixelFormat.RGB_565);
+ getWindow().setSoftInputMode(
+ WindowManager.LayoutParams.SOFT_INPUT_STATE_UNSPECIFIED
+ | WindowManager.LayoutParams.SOFT_INPUT_ADJUST_RESIZE);
+
+ mNativeContentView = new NativeContentView(this);
+ mNativeContentView.mActivity = this;
+ setContentView(mNativeContentView);
+ mNativeContentView.requestFocus();
+ mNativeContentView.getViewTreeObserver().addOnGlobalLayoutListener(this);
try {
ai = getPackageManager().getActivityInfo(
@@ -165,6 +207,18 @@
}
}
+ @Override
+ public boolean dispatchKeyEvent(KeyEvent event) {
+ if (mDispatchingUnhandledKey) {
+ return super.dispatchKeyEvent(event);
+ } else {
+ // Key events from the IME do not go through the input channel;
+ // we need to intercept them here to hand to the application.
+ dispatchKeyEventNative(mNativeHandle, event);
+ return true;
+ }
+ }
+
public void surfaceCreated(SurfaceHolder holder) {
if (!mDestroyed) {
mCurSurfaceHolder = holder;
@@ -179,6 +233,13 @@
}
}
+ public void surfaceRedrawNeeded(SurfaceHolder holder) {
+ if (!mDestroyed) {
+ mCurSurfaceHolder = holder;
+ onSurfaceRedrawNeededNative(mNativeHandle, holder.getSurface());
+ }
+ }
+
public void surfaceDestroyed(SurfaceHolder holder) {
mCurSurfaceHolder = null;
if (!mDestroyed) {
@@ -200,10 +261,32 @@
}
}
+ public void onGlobalLayout() {
+ mNativeContentView.getLocationInWindow(mLocation);
+ int w = mNativeContentView.getWidth();
+ int h = mNativeContentView.getHeight();
+ if (mLocation[0] != mLastContentX || mLocation[1] != mLastContentY
+ || w != mLastContentWidth || h != mLastContentHeight) {
+ mLastContentX = mLocation[0];
+ mLastContentY = mLocation[1];
+ mLastContentWidth = w;
+ mLastContentHeight = h;
+ if (!mDestroyed) {
+ onContentRectChangedNative(mNativeHandle, mLastContentX,
+ mLastContentY, mLastContentWidth, mLastContentHeight);
+ }
+ }
+ }
+
void dispatchUnhandledKeyEvent(KeyEvent event) {
- View decor = getWindow().getDecorView();
- if (decor != null) {
- decor.dispatchKeyEvent(event);
+ try {
+ mDispatchingUnhandledKey = true;
+ View decor = getWindow().getDecorView();
+ if (decor != null) {
+ decor.dispatchKeyEvent(event);
+ }
+ } finally {
+ mDispatchingUnhandledKey = false;
}
}
@@ -214,4 +297,12 @@
void setWindowFormat(int format) {
getWindow().setFormat(format);
}
+
+ void showIme(int mode) {
+ mIMM.showSoftInput(mNativeContentView, mode);
+ }
+
+ void hideIme(int mode) {
+ mIMM.hideSoftInputFromWindow(mNativeContentView.getWindowToken(), mode);
+ }
}
diff --git a/core/java/android/appwidget/AppWidgetManager.java b/core/java/android/appwidget/AppWidgetManager.java
index d4ce6a1..3f12bf9 100644
--- a/core/java/android/appwidget/AppWidgetManager.java
+++ b/core/java/android/appwidget/AppWidgetManager.java
@@ -22,7 +22,6 @@
import android.os.RemoteException;
import android.os.ServiceManager;
import android.util.DisplayMetrics;
-import android.util.Log;
import android.util.TypedValue;
import android.widget.RemoteViews;
@@ -149,7 +148,7 @@
* instances as possible.</td>
* </tr>
* </table>
- *
+ *
* @see AppWidgetProvider#onUpdate AppWidgetProvider.onUpdate(Context context, AppWidgetManager appWidgetManager, int[] appWidgetIds)
*/
public static final String ACTION_APPWIDGET_UPDATE = "android.appwidget.action.APPWIDGET_UPDATE";
@@ -163,7 +162,7 @@
/**
* Sent when an instance of an AppWidget is removed from the last host.
- *
+ *
* @see AppWidgetProvider#onEnabled AppWidgetProvider.onEnabled(Context context)
*/
public static final String ACTION_APPWIDGET_DISABLED = "android.appwidget.action.APPWIDGET_DISABLED";
@@ -172,7 +171,7 @@
* Sent when an instance of an AppWidget is added to a host for the first time.
* This broadcast is sent at boot time if there is a AppWidgetHost installed with
* an instance for this provider.
- *
+ *
* @see AppWidgetProvider#onEnabled AppWidgetProvider.onEnabled(Context context)
*/
public static final String ACTION_APPWIDGET_ENABLED = "android.appwidget.action.APPWIDGET_ENABLED";
@@ -183,20 +182,21 @@
* @see AppWidgetProviderInfo
*/
public static final String META_DATA_APPWIDGET_PROVIDER = "android.appwidget.provider";
-
+
/**
* Field for the manifest meta-data tag used to indicate any previous name for the
* app widget receiver.
*
* @see AppWidgetProviderInfo
- *
+ *
* @hide Pending API approval
*/
public static final String META_DATA_APPWIDGET_OLD_NAME = "android.appwidget.oldName";
- static WeakHashMap<Context, WeakReference<AppWidgetManager>> sManagerCache = new WeakHashMap();
+ static WeakHashMap<Context, WeakReference<AppWidgetManager>> sManagerCache =
+ new WeakHashMap<Context, WeakReference<AppWidgetManager>>();
static IAppWidgetService sService;
-
+
Context mContext;
private DisplayMetrics mDisplayMetrics;
@@ -219,7 +219,7 @@
}
if (result == null) {
result = new AppWidgetManager(context);
- sManagerCache.put(context, new WeakReference(result));
+ sManagerCache.put(context, new WeakReference<AppWidgetManager>(result));
}
return result;
}
@@ -292,7 +292,15 @@
*/
public List<AppWidgetProviderInfo> getInstalledProviders() {
try {
- return sService.getInstalledProviders();
+ List<AppWidgetProviderInfo> providers = sService.getInstalledProviders();
+ for (AppWidgetProviderInfo info : providers) {
+ // Converting complex to dp.
+ info.minWidth =
+ TypedValue.complexToDimensionPixelSize(info.minWidth, mDisplayMetrics);
+ info.minHeight =
+ TypedValue.complexToDimensionPixelSize(info.minHeight, mDisplayMetrics);
+ }
+ return providers;
}
catch (RemoteException e) {
throw new RuntimeException("system server dead?", e);
@@ -310,7 +318,7 @@
AppWidgetProviderInfo info = sService.getAppWidgetInfo(appWidgetId);
if (info != null) {
// Converting complex to dp.
- info.minWidth =
+ info.minWidth =
TypedValue.complexToDimensionPixelSize(info.minWidth, mDisplayMetrics);
info.minHeight =
TypedValue.complexToDimensionPixelSize(info.minHeight, mDisplayMetrics);
@@ -344,7 +352,7 @@
/**
* Get the list of appWidgetIds that have been bound to the given AppWidget
* provider.
- *
+ *
* @param provider The {@link android.content.BroadcastReceiver} that is the
* AppWidget provider to find appWidgetIds for.
*/
diff --git a/core/java/android/content/Context.java b/core/java/android/content/Context.java
index 86ddee4..b49d801 100644
--- a/core/java/android/content/Context.java
+++ b/core/java/android/content/Context.java
@@ -1561,6 +1561,15 @@
public static final String UI_MODE_SERVICE = "uimode";
/**
+ * Use with {@link #getSystemService} to retrieve a
+ * {@link android.net.DownloadManager} for requesting HTTP downloads.
+ *
+ * @see #getSystemService
+ * @hide (TODO) for now
+ */
+ public static final String DOWNLOAD_SERVICE = "download";
+
+ /**
* Determine whether the given permission is allowed for a particular
* process and user ID running in the system.
*
diff --git a/core/java/android/database/sqlite/SQLiteDatabase.java b/core/java/android/database/sqlite/SQLiteDatabase.java
index 441370a..c0226f8 100644
--- a/core/java/android/database/sqlite/SQLiteDatabase.java
+++ b/core/java/android/database/sqlite/SQLiteDatabase.java
@@ -1047,6 +1047,7 @@
closeClosable();
// finalize ALL statements queued up so far
closePendingStatements();
+ releaseCustomFunctions();
// close this database instance - regardless of its reference count value
dbclose();
if (mConnectionPool != null) {
@@ -1083,6 +1084,54 @@
private native void dbclose();
/**
+ * A callback interface for a custom sqlite3 function.
+ * This can be used to create a function that can be called from
+ * sqlite3 database triggers.
+ * @hide
+ */
+ public interface CustomFunction {
+ public void callback(String[] args);
+ }
+
+ /**
+ * Registers a CustomFunction callback as a function that can be called from
+ * sqlite3 database triggers.
+ * @param name the name of the sqlite3 function
+ * @param numArgs the number of arguments for the function
+ * @param function callback to call when the function is executed
+ * @hide
+ */
+ public void addCustomFunction(String name, int numArgs, CustomFunction function) {
+ verifyDbIsOpen();
+ synchronized (mCustomFunctions) {
+ int ref = native_addCustomFunction(name, numArgs, function);
+ if (ref != 0) {
+ // save a reference to the function for cleanup later
+ mCustomFunctions.add(new Integer(ref));
+ } else {
+ throw new SQLiteException("failed to add custom function " + name);
+ }
+ }
+ }
+
+ private void releaseCustomFunctions() {
+ synchronized (mCustomFunctions) {
+ for (int i = 0; i < mCustomFunctions.size(); i++) {
+ Integer function = mCustomFunctions.get(i);
+ native_releaseCustomFunction(function.intValue());
+ }
+ mCustomFunctions.clear();
+ }
+ }
+
+ // list of CustomFunction references so we can clean up when the database closes
+ private final ArrayList<Integer> mCustomFunctions =
+ new ArrayList<Integer>();
+
+ private native int native_addCustomFunction(String name, int numArgs, CustomFunction function);
+ private native void native_releaseCustomFunction(int function);
+
+ /**
* Gets the database version.
*
* @return the database version
@@ -1959,12 +2008,17 @@
}
@Override
- protected void finalize() {
- if (isOpen()) {
- Log.e(TAG, "close() was never explicitly called on database '" +
- mPath + "' ", mStackTrace);
- closeClosable();
- onAllReferencesReleased();
+ protected void finalize() throws Throwable {
+ try {
+ if (isOpen()) {
+ Log.e(TAG, "close() was never explicitly called on database '" +
+ mPath + "' ", mStackTrace);
+ closeClosable();
+ onAllReferencesReleased();
+ releaseCustomFunctions();
+ }
+ } finally {
+ super.finalize();
}
}
diff --git a/core/java/android/net/DownloadManager.java b/core/java/android/net/DownloadManager.java
new file mode 100644
index 0000000..02b6210
--- /dev/null
+++ b/core/java/android/net/DownloadManager.java
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.net;
+
+import android.content.ContentResolver;
+import android.content.ContentValues;
+import android.database.Cursor;
+import android.database.CursorWrapper;
+import android.os.ParcelFileDescriptor;
+import android.provider.Downloads;
+import android.util.Log;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * The download manager is a system service that handles long-running HTTP downloads. Clients may
+ * request that a URI be downloaded to a particular destination file. The download manager will
+ * conduct the download in the background, taking care of HTTP interactions and retrying downloads
+ * after failures or across connectivity changes and system reboots.
+ *
+ * Instances of this class should be obtained through
+ * {@link android.content.Context#getSystemService(String)} by passing
+ * {@link android.content.Context#DOWNLOAD_SERVICE}.
+ *
+ * @hide
+ */
+public class DownloadManager {
+ /**
+ * An identifier for a particular download, unique across the system. Clients use this ID to
+ * make subsequent calls related to the download.
+ */
+ public final static String COLUMN_ID = "id";
+
+ /**
+ * The client-supplied title for this download. This will be displayed in system notifications,
+ * if enabled.
+ */
+ public final static String COLUMN_TITLE = "title";
+
+ /**
+ * The client-supplied description of this download. This will be displayed in system
+ * notifications, if enabled.
+ */
+ public final static String COLUMN_DESCRIPTION = "description";
+
+ /**
+ * URI to be downloaded.
+ */
+ public final static String COLUMN_URI = "uri";
+
+ /**
+ * Internet Media Type of the downloaded file. This will be filled in based on the server's
+ * response once the download has started.
+ *
+ * @see <a href="http://www.ietf.org/rfc/rfc1590.txt">RFC 1590, defining Media Types</a>
+ */
+ public final static String COLUMN_MEDIA_TYPE = "media_type";
+
+ /**
+ * Total size of the download in bytes. This will be filled in once the download starts.
+ */
+ public final static String COLUMN_TOTAL_SIZE_BYTES = "total_size";
+
+ /**
+ * Uri where downloaded file will be stored. If a destination is supplied by client, that URI
+ * will be used here. Otherwise, the value will be filled in with a generated URI once the
+ * download has started.
+ */
+ public final static String COLUMN_LOCAL_URI = "local_uri";
+
+ /**
+ * Current status of the download, as one of the STATUS_* constants.
+ */
+ public final static String COLUMN_STATUS = "status";
+
+ /**
+ * Indicates the type of error that occurred, when {@link #COLUMN_STATUS} is
+ * {@link #STATUS_FAILED}. If an HTTP error occurred, this will hold the HTTP status code as
+ * defined in RFC 2616. Otherwise, it will hold one of the ERROR_* constants.
+ *
+ * If {@link #COLUMN_STATUS} is not {@link #STATUS_FAILED}, this column's value is undefined.
+ *
+ * @see <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html#sec6.1.1">RFC 2616
+ * status codes</a>
+ */
+ public final static String COLUMN_ERROR_CODE = "error_code";
+
+ /**
+ * Number of bytes download so far.
+ */
+ public final static String COLUMN_BYTES_DOWNLOADED_SO_FAR = "bytes_so_far";
+
+ /**
+ * Timestamp when the download was last modified, in {@link System#currentTimeMillis
+ * System.currentTimeMillis()} (wall clock time in UTC).
+ */
+ public final static String COLUMN_LAST_MODIFIED_TIMESTAMP = "last_modified_timestamp";
+
+
+ /**
+ * Value of {@link #COLUMN_STATUS} when the download is waiting to start.
+ */
+ public final static int STATUS_PENDING = 1 << 0;
+
+ /**
+ * Value of {@link #COLUMN_STATUS} when the download is currently running.
+ */
+ public final static int STATUS_RUNNING = 1 << 1;
+
+ /**
+ * Value of {@link #COLUMN_STATUS} when the download is waiting to retry or resume.
+ */
+ public final static int STATUS_PAUSED = 1 << 2;
+
+ /**
+ * Value of {@link #COLUMN_STATUS} when the download has successfully completed.
+ */
+ public final static int STATUS_SUCCESSFUL = 1 << 3;
+
+ /**
+ * Value of {@link #COLUMN_STATUS} when the download has failed (and will not be retried).
+ */
+ public final static int STATUS_FAILED = 1 << 4;
+
+
+ /**
+ * Value of COLUMN_ERROR_CODE when the download has completed with an error that doesn't fit
+ * under any other error code.
+ */
+ public final static int ERROR_UNKNOWN = 1000;
+
+ /**
+ * Value of {@link #COLUMN_ERROR_CODE} when a storage issue arises which doesn't fit under any
+ * other error code. Use the more specific {@link #ERROR_INSUFFICIENT_SPACE} and
+ * {@link #ERROR_DEVICE_NOT_FOUND} when appropriate.
+ */
+ public final static int ERROR_FILE_ERROR = 1001;
+
+ /**
+ * Value of {@link #COLUMN_ERROR_CODE} when an HTTP code was received that download manager
+ * can't handle.
+ */
+ public final static int ERROR_UNHANDLED_HTTP_CODE = 1002;
+
+ /**
+ * Value of {@link #COLUMN_ERROR_CODE} when an error receiving or processing data occurred at
+ * the HTTP level.
+ */
+ public final static int ERROR_HTTP_DATA_ERROR = 1004;
+
+ /**
+ * Value of {@link #COLUMN_ERROR_CODE} when there were too many redirects.
+ */
+ public final static int ERROR_TOO_MANY_REDIRECTS = 1005;
+
+ /**
+ * Value of {@link #COLUMN_ERROR_CODE} when there was insufficient storage space. Typically,
+ * this is because the SD card is full.
+ */
+ public final static int ERROR_INSUFFICIENT_SPACE = 1006;
+
+ /**
+ * Value of {@link #COLUMN_ERROR_CODE} when no external storage device was found. Typically,
+ * this is because the SD card is not mounted.
+ */
+ public final static int ERROR_DEVICE_NOT_FOUND = 1007;
+
+
+ // this array must contain all public columns
+ private static final String[] COLUMNS = new String[] {
+ COLUMN_ID,
+ COLUMN_TITLE,
+ COLUMN_DESCRIPTION,
+ COLUMN_URI,
+ COLUMN_MEDIA_TYPE,
+ COLUMN_TOTAL_SIZE_BYTES,
+ COLUMN_LOCAL_URI,
+ COLUMN_STATUS,
+ COLUMN_ERROR_CODE,
+ COLUMN_BYTES_DOWNLOADED_SO_FAR,
+ COLUMN_LAST_MODIFIED_TIMESTAMP
+ };
+
+ // columns to request from DownloadProvider
+ private static final String[] UNDERLYING_COLUMNS = new String[] {
+ Downloads.Impl._ID,
+ Downloads.COLUMN_TITLE,
+ Downloads.COLUMN_DESCRIPTION,
+ Downloads.COLUMN_URI,
+ Downloads.COLUMN_MIME_TYPE,
+ Downloads.COLUMN_TOTAL_BYTES,
+ Downloads._DATA,
+ Downloads.COLUMN_STATUS,
+ Downloads.COLUMN_CURRENT_BYTES,
+ Downloads.COLUMN_LAST_MODIFICATION,
+ };
+
+ private static final Set<String> LONG_COLUMNS = new HashSet<String>(
+ Arrays.asList(COLUMN_ID, COLUMN_TOTAL_SIZE_BYTES, COLUMN_STATUS, COLUMN_ERROR_CODE,
+ COLUMN_BYTES_DOWNLOADED_SO_FAR, COLUMN_LAST_MODIFIED_TIMESTAMP));
+
+ /**
+ * This class contains all the information necessary to request a new download. The URI is the
+ * only required parameter.
+ */
+ public static class Request {
+ /**
+ * Bit flag for setShowNotification indicated a notification should be created while the
+ * download is running.
+ */
+ private static final int NOTIFICATION_WHEN_RUNNING = 1;
+
+ Uri mUri;
+ Uri mDestinationUri;
+ Map<String, String> mRequestHeaders = new HashMap<String, String>();
+ String mTitle;
+ String mDescription;
+ int mNotificationFlags;
+
+ private String mMediaType;
+
+ /**
+ * @param uri the HTTP URI to download.
+ */
+ public Request(Uri uri) {
+ if (uri == null) {
+ throw new NullPointerException();
+ }
+ String scheme = uri.getScheme();
+ if (scheme == null || !scheme.equals("http")) {
+ throw new IllegalArgumentException("Can only download HTTP URIs: " + uri);
+ }
+ mUri = uri;
+ }
+
+ /**
+ * Set the local destination for the downloaded data. Must be a file URI to a path on
+ * external storage, and the calling application must have the WRITE_EXTERNAL_STORAGE
+ * permission.
+ *
+ * By default, downloads are saved to a generated file in the download cache and may be
+ * deleted by the download manager at any time.
+ *
+ * @return this object
+ */
+ public Request setDestinationUri(Uri uri) {
+ mDestinationUri = uri;
+ return this;
+ }
+
+ /**
+ * Set an HTTP header to be included with the download request.
+ * @param header HTTP header name
+ * @param value header value
+ * @return this object
+ */
+ public Request setRequestHeader(String header, String value) {
+ mRequestHeaders.put(header, value);
+ return this;
+ }
+
+ /**
+ * Set the title of this download, to be displayed in notifications (if enabled)
+ * @return this object
+ */
+ public Request setTitle(String title) {
+ mTitle = title;
+ return this;
+ }
+
+ /**
+ * Set a description of this download, to be displayed in notifications (if enabled)
+ * @return this object
+ */
+ public Request setDescription(String description) {
+ mDescription = description;
+ return this;
+ }
+
+ /**
+ * Set the Internet Media Type of this download. This will override the media type declared
+ * in the server's response.
+ * @see <a href="http://www.ietf.org/rfc/rfc1590.txt">RFC 1590, defining Media Types</a>
+ * @return this object
+ */
+ public Request setMediaType(String mediaType) {
+ mMediaType = mediaType;
+ return this;
+ }
+
+ /**
+ * Control system notifications posted by the download manager for this download. If
+ * enabled, the download manager posts notifications about downloads through the system
+ * {@link android.app.NotificationManager}.
+ *
+ * @param flags any combination of the NOTIFICATION_* bit flags
+ * @return this object
+ */
+ public Request setShowNotification(int flags) {
+ mNotificationFlags = flags;
+ return this;
+ }
+
+ public Request setAllowedNetworkTypes(int flags) {
+ // TODO allowed networks support
+ throw new UnsupportedOperationException();
+ }
+
+ public Request setAllowedOverRoaming(boolean allowed) {
+ // TODO roaming support
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * @return ContentValues to be passed to DownloadProvider.insert()
+ */
+ ContentValues toContentValues() {
+ ContentValues values = new ContentValues();
+ assert mUri != null;
+ values.put(Downloads.COLUMN_URI, mUri.toString());
+
+ if (mDestinationUri != null) {
+ values.put(Downloads.COLUMN_DESTINATION, Downloads.Impl.DESTINATION_FILE_URI);
+ values.put(Downloads.COLUMN_FILE_NAME_HINT, mDestinationUri.toString());
+ } else {
+ values.put(Downloads.COLUMN_DESTINATION,
+ Downloads.DESTINATION_CACHE_PARTITION_PURGEABLE);
+ }
+
+ if (!mRequestHeaders.isEmpty()) {
+ // TODO request headers support
+ throw new UnsupportedOperationException();
+ }
+
+ putIfNonNull(values, Downloads.COLUMN_TITLE, mTitle);
+ putIfNonNull(values, Downloads.COLUMN_DESCRIPTION, mDescription);
+ putIfNonNull(values, Downloads.COLUMN_MIME_TYPE, mMediaType);
+
+ int visibility = Downloads.VISIBILITY_HIDDEN;
+ if ((mNotificationFlags & NOTIFICATION_WHEN_RUNNING) != 0) {
+ visibility = Downloads.VISIBILITY_VISIBLE;
+ }
+ values.put(Downloads.COLUMN_VISIBILITY, visibility);
+
+ return values;
+ }
+
+ private void putIfNonNull(ContentValues contentValues, String key, String value) {
+ if (value != null) {
+ contentValues.put(key, value);
+ }
+ }
+ }
+
+ /**
+ * This class may be used to filter download manager queries.
+ */
+ public static class Query {
+ private Long mId;
+ private Integer mStatusFlags = null;
+
+ /**
+ * Include only the download with the given ID.
+ * @return this object
+ */
+ public Query setFilterById(long id) {
+ mId = id;
+ return this;
+ }
+
+ /**
+ * Include only downloads with status matching any the given status flags.
+ * @param flags any combination of the STATUS_* bit flags
+ * @return this object
+ */
+ public Query setFilterByStatus(int flags) {
+ mStatusFlags = flags;
+ return this;
+ }
+
+ /**
+ * Run this query using the given ContentResolver.
+ * @param projection the projection to pass to ContentResolver.query()
+ * @return the Cursor returned by ContentResolver.query()
+ */
+ Cursor runQuery(ContentResolver resolver, String[] projection) {
+ Uri uri = Downloads.CONTENT_URI;
+ String selection = null;
+
+ if (mId != null) {
+ uri = Uri.withAppendedPath(uri, mId.toString());
+ }
+
+ if (mStatusFlags != null) {
+ List<String> parts = new ArrayList<String>();
+ if ((mStatusFlags & STATUS_PENDING) != 0) {
+ parts.add(statusClause("=", Downloads.STATUS_PENDING));
+ }
+ if ((mStatusFlags & STATUS_RUNNING) != 0) {
+ parts.add(statusClause("=", Downloads.STATUS_RUNNING));
+ }
+ if ((mStatusFlags & STATUS_PAUSED) != 0) {
+ parts.add(statusClause("=", Downloads.STATUS_PENDING_PAUSED));
+ parts.add(statusClause("=", Downloads.STATUS_RUNNING_PAUSED));
+ }
+ if ((mStatusFlags & STATUS_SUCCESSFUL) != 0) {
+ parts.add(statusClause("=", Downloads.STATUS_SUCCESS));
+ }
+ if ((mStatusFlags & STATUS_FAILED) != 0) {
+ parts.add("(" + statusClause(">=", 400)
+ + " AND " + statusClause("<", 600) + ")");
+ }
+ selection = joinStrings(" OR ", parts);
+ Log.w("DownloadManagerPublic", selection);
+ }
+ String orderBy = Downloads.COLUMN_LAST_MODIFICATION + " DESC";
+ return resolver.query(uri, projection, selection, null, orderBy);
+ }
+
+ private String joinStrings(String joiner, Iterable<String> parts) {
+ StringBuilder builder = new StringBuilder();
+ boolean first = true;
+ for (String part : parts) {
+ if (!first) {
+ builder.append(joiner);
+ }
+ builder.append(part);
+ first = false;
+ }
+ return builder.toString();
+ }
+
+ private String statusClause(String operator, int value) {
+ return Downloads.COLUMN_STATUS + operator + "'" + value + "'";
+ }
+ }
+
+ private ContentResolver mResolver;
+
+ /**
+ * @hide
+ */
+ public DownloadManager(ContentResolver resolver) {
+ mResolver = resolver;
+ }
+
+ /**
+ * Enqueue a new download. The download will start automatically once the download manager is
+ * ready to execute it and connectivity is available.
+ *
+ * @param request the parameters specifying this download
+ * @return an ID for the download, unique across the system. This ID is used to make future
+ * calls related to this download.
+ */
+ public long enqueue(Request request) {
+ ContentValues values = request.toContentValues();
+ Uri downloadUri = mResolver.insert(Downloads.CONTENT_URI, values);
+ long id = Long.parseLong(downloadUri.getLastPathSegment());
+ return id;
+ }
+
+ /**
+ * Cancel a download and remove it from the download manager. The download will be stopped if
+ * it was running, and it will no longer be accessible through the download manager. If a file
+ * was already downloaded, it will not be deleted.
+ *
+ * @param id the ID of the download
+ */
+ public void remove(long id) {
+ int numDeleted = mResolver.delete(getDownloadUri(id), null, null);
+ if (numDeleted == 0) {
+ throw new IllegalArgumentException("Download " + id + " does not exist");
+ }
+ }
+
+ /**
+ * Query the download manager about downloads that have been requested.
+ * @param query parameters specifying filters for this query
+ * @return a Cursor over the result set of downloads, with columns consisting of all the
+ * COLUMN_* constants.
+ */
+ public Cursor query(Query query) {
+ Cursor underlyingCursor = query.runQuery(mResolver, UNDERLYING_COLUMNS);
+ return new CursorTranslator(underlyingCursor);
+ }
+
+ /**
+ * Open a downloaded file for reading. The download must have completed.
+ * @param id the ID of the download
+ * @return a read-only {@link ParcelFileDescriptor}
+ * @throws FileNotFoundException if the destination file does not already exist
+ */
+ public ParcelFileDescriptor openDownloadedFile(long id) throws FileNotFoundException {
+ return mResolver.openFileDescriptor(getDownloadUri(id), "r");
+ }
+
+ /**
+ * Get the DownloadProvider URI for the download with the given ID.
+ */
+ private Uri getDownloadUri(long id) {
+ Uri downloadUri = Uri.withAppendedPath(Downloads.CONTENT_URI, Long.toString(id));
+ return downloadUri;
+ }
+
+ /**
+ * This class wraps a cursor returned by DownloadProvider -- the "underlying cursor" -- and
+ * presents a different set of columns, those defined in the DownloadManager.COLUMN_* constants.
+ * Some columns correspond directly to underlying values while others are computed from
+ * underlying data.
+ */
+ private static class CursorTranslator extends CursorWrapper {
+ public CursorTranslator(Cursor cursor) {
+ super(cursor);
+ }
+
+ @Override
+ public int getColumnIndex(String columnName) {
+ return Arrays.asList(COLUMNS).indexOf(columnName);
+ }
+
+ @Override
+ public int getColumnIndexOrThrow(String columnName) throws IllegalArgumentException {
+ int index = getColumnIndex(columnName);
+ if (index == -1) {
+ throw new IllegalArgumentException();
+ }
+ return index;
+ }
+
+ @Override
+ public String getColumnName(int columnIndex) {
+ int numColumns = COLUMNS.length;
+ if (columnIndex < 0 || columnIndex >= numColumns) {
+ throw new IllegalArgumentException("Invalid column index " + columnIndex + ", "
+ + numColumns + " columns exist");
+ }
+ return COLUMNS[columnIndex];
+ }
+
+ @Override
+ public String[] getColumnNames() {
+ String[] returnColumns = new String[COLUMNS.length];
+ System.arraycopy(COLUMNS, 0, returnColumns, 0, COLUMNS.length);
+ return returnColumns;
+ }
+
+ @Override
+ public int getColumnCount() {
+ return COLUMNS.length;
+ }
+
+ @Override
+ public byte[] getBlob(int columnIndex) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public double getDouble(int columnIndex) {
+ return getLong(columnIndex);
+ }
+
+ private boolean isLongColumn(String column) {
+ return LONG_COLUMNS.contains(column);
+ }
+
+ @Override
+ public float getFloat(int columnIndex) {
+ return (float) getDouble(columnIndex);
+ }
+
+ @Override
+ public int getInt(int columnIndex) {
+ return (int) getLong(columnIndex);
+ }
+
+ @Override
+ public long getLong(int columnIndex) {
+ return translateLong(getColumnName(columnIndex));
+ }
+
+ @Override
+ public short getShort(int columnIndex) {
+ return (short) getLong(columnIndex);
+ }
+
+ @Override
+ public String getString(int columnIndex) {
+ return translateString(getColumnName(columnIndex));
+ }
+
+ private String translateString(String column) {
+ if (isLongColumn(column)) {
+ return Long.toString(translateLong(column));
+ }
+ if (column.equals(COLUMN_TITLE)) {
+ return getUnderlyingString(Downloads.COLUMN_TITLE);
+ }
+ if (column.equals(COLUMN_DESCRIPTION)) {
+ return getUnderlyingString(Downloads.COLUMN_DESCRIPTION);
+ }
+ if (column.equals(COLUMN_URI)) {
+ return getUnderlyingString(Downloads.COLUMN_URI);
+ }
+ if (column.equals(COLUMN_MEDIA_TYPE)) {
+ return getUnderlyingString(Downloads.COLUMN_MIME_TYPE);
+ }
+ assert column.equals(COLUMN_LOCAL_URI);
+ return Uri.fromFile(new File(getUnderlyingString(Downloads._DATA))).toString();
+ }
+
+ private long translateLong(String column) {
+ if (!isLongColumn(column)) {
+ // mimic behavior of underlying cursor -- most likely, throw NumberFormatException
+ return Long.valueOf(translateString(column));
+ }
+
+ if (column.equals(COLUMN_ID)) {
+ return getUnderlyingLong(Downloads.Impl._ID);
+ }
+ if (column.equals(COLUMN_TOTAL_SIZE_BYTES)) {
+ return getUnderlyingLong(Downloads.COLUMN_TOTAL_BYTES);
+ }
+ if (column.equals(COLUMN_STATUS)) {
+ return translateStatus((int) getUnderlyingLong(Downloads.COLUMN_STATUS));
+ }
+ if (column.equals(COLUMN_ERROR_CODE)) {
+ return translateErrorCode((int) getUnderlyingLong(Downloads.COLUMN_STATUS));
+ }
+ if (column.equals(COLUMN_BYTES_DOWNLOADED_SO_FAR)) {
+ return getUnderlyingLong(Downloads.COLUMN_CURRENT_BYTES);
+ }
+ assert column.equals(COLUMN_LAST_MODIFIED_TIMESTAMP);
+ return getUnderlyingLong(Downloads.COLUMN_LAST_MODIFICATION);
+ }
+
+ private long translateErrorCode(int status) {
+ if (translateStatus(status) != STATUS_FAILED) {
+ return 0; // arbitrary value when status is not an error
+ }
+ if ((400 <= status && status < 490) || (500 <= status && status < 600)) {
+ // HTTP status code
+ return status;
+ }
+
+ switch (status) {
+ case Downloads.STATUS_FILE_ERROR:
+ return ERROR_FILE_ERROR;
+
+ case Downloads.STATUS_UNHANDLED_HTTP_CODE:
+ case Downloads.STATUS_UNHANDLED_REDIRECT:
+ return ERROR_UNHANDLED_HTTP_CODE;
+
+ case Downloads.STATUS_HTTP_DATA_ERROR:
+ return ERROR_HTTP_DATA_ERROR;
+
+ case Downloads.STATUS_TOO_MANY_REDIRECTS:
+ return ERROR_TOO_MANY_REDIRECTS;
+
+ case Downloads.STATUS_INSUFFICIENT_SPACE_ERROR:
+ return ERROR_INSUFFICIENT_SPACE;
+
+ case Downloads.STATUS_DEVICE_NOT_FOUND_ERROR:
+ return ERROR_DEVICE_NOT_FOUND;
+
+ default:
+ return ERROR_UNKNOWN;
+ }
+ }
+
+ private long getUnderlyingLong(String column) {
+ return super.getLong(super.getColumnIndex(column));
+ }
+
+ private String getUnderlyingString(String column) {
+ return super.getString(super.getColumnIndex(column));
+ }
+
+ private long translateStatus(int status) {
+ switch (status) {
+ case Downloads.STATUS_PENDING:
+ return STATUS_PENDING;
+
+ case Downloads.STATUS_RUNNING:
+ return STATUS_RUNNING;
+
+ case Downloads.STATUS_PENDING_PAUSED:
+ case Downloads.STATUS_RUNNING_PAUSED:
+ return STATUS_PAUSED;
+
+ case Downloads.STATUS_SUCCESS:
+ return STATUS_SUCCESSFUL;
+
+ default:
+ assert Downloads.isStatusError(status);
+ return STATUS_FAILED;
+ }
+ }
+ }
+}
diff --git a/core/java/android/net/MobileDataStateTracker.java b/core/java/android/net/MobileDataStateTracker.java
index 5fd5315..e74db67 100644
--- a/core/java/android/net/MobileDataStateTracker.java
+++ b/core/java/android/net/MobileDataStateTracker.java
@@ -29,6 +29,7 @@
import com.android.internal.telephony.TelephonyIntents;
import android.net.NetworkInfo.DetailedState;
import android.net.NetworkInfo;
+import android.net.NetworkProperties;
import android.telephony.TelephonyManager;
import android.util.Log;
import android.text.TextUtils;
@@ -55,7 +56,7 @@
private boolean mTeardownRequested = false;
private Handler mTarget;
private Context mContext;
- private String mInterfaceName;
+ private NetworkProperties mNetworkProperties;
private boolean mPrivateDnsRouteSet = false;
private int mDefaultGatewayAddr = 0;
private boolean mDefaultRouteSet = false;
@@ -101,14 +102,6 @@
return sDnsPropNames;
}
- /**
- * Return the name of our network interface.
- * @return the name of our interface.
- */
- public String getInterfaceName() {
- return mInterfaceName;
- }
-
public boolean isPrivateDnsRouteSet() {
return mPrivateDnsRouteSet;
}
@@ -211,9 +204,11 @@
}
setDetailedState(DetailedState.DISCONNECTED, reason, apnName);
- if (mInterfaceName != null) {
- NetworkUtils.resetConnections(mInterfaceName);
+ if (mNetworkProperties != null) {
+ NetworkUtils.resetConnections(mNetworkProperties.getInterface().
+ getName());
}
+ // TODO - check this
// can't do this here - ConnectivityService needs it to clear stuff
// it's ok though - just leave it to be refreshed next time
// we connect.
@@ -229,9 +224,11 @@
setDetailedState(DetailedState.SUSPENDED, reason, apnName);
break;
case CONNECTED:
- mInterfaceName = intent.getStringExtra(Phone.DATA_IFACE_NAME_KEY);
- if (mInterfaceName == null) {
- Log.d(TAG, "CONNECTED event did not supply interface name.");
+ mNetworkProperties = intent.getParcelableExtra(
+ Phone.DATA_NETWORK_PROPERTIES_KEY);
+ if (mNetworkProperties == null) {
+ Log.d(TAG,
+ "CONNECTED event did not supply network properties.");
}
setDetailedState(DetailedState.CONNECTED, reason, apnName);
break;
@@ -565,4 +562,8 @@
return null;
}
}
+
+ public NetworkProperties getNetworkProperties() {
+ return mNetworkProperties;
+ }
}
diff --git a/core/java/android/net/NetworkProperties.aidl b/core/java/android/net/NetworkProperties.aidl
new file mode 100644
index 0000000..07aac6e
--- /dev/null
+++ b/core/java/android/net/NetworkProperties.aidl
@@ -0,0 +1,22 @@
+/*
+**
+** Copyright (C) 2009 Qualcomm Innovation Center, Inc. All Rights Reserved.
+** Copyright (C) 2009 The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+package android.net;
+
+parcelable NetworkProperties;
+
diff --git a/core/java/android/net/NetworkProperties.java b/core/java/android/net/NetworkProperties.java
new file mode 100644
index 0000000..56e1f1a
--- /dev/null
+++ b/core/java/android/net/NetworkProperties.java
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.net;
+
+import android.os.Parcelable;
+import android.os.Parcel;
+import android.util.Log;
+
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ * Describes the properties of a network interface or single address
+ * of an interface.
+ * TODO - consider adding optional fields like Apn and ApnType
+ * @hide
+ */
+public class NetworkProperties implements Parcelable {
+
+ private NetworkInterface mIface;
+ private Collection<InetAddress> mAddresses;
+ private Collection<InetAddress> mDnses;
+ private InetAddress mGateway;
+ private ProxyProperties mHttpProxy;
+
+ public NetworkProperties() {
+ clear();
+ }
+
+ public synchronized void setInterface(NetworkInterface iface) {
+ mIface = iface;
+ }
+ public synchronized NetworkInterface getInterface() {
+ return mIface;
+ }
+ public synchronized String getInterfaceName() {
+ return (mIface == null ? null : mIface.getName());
+ }
+
+ public synchronized void addAddress(InetAddress address) {
+ mAddresses.add(address);
+ }
+ public synchronized Collection<InetAddress> getAddresses() {
+ return mAddresses;
+ }
+
+ public synchronized void addDns(InetAddress dns) {
+ mDnses.add(dns);
+ }
+ public synchronized Collection<InetAddress> getDnses() {
+ return mDnses;
+ }
+
+ public synchronized void setGateway(InetAddress gateway) {
+ mGateway = gateway;
+ }
+ public synchronized InetAddress getGateway() {
+ return mGateway;
+ }
+
+ public synchronized void setHttpProxy(ProxyProperties proxy) {
+ mHttpProxy = proxy;
+ }
+ public synchronized ProxyProperties getHttpProxy() {
+ return mHttpProxy;
+ }
+
+ public synchronized void clear() {
+ mIface = null;
+ mAddresses = new ArrayList<InetAddress>();
+ mDnses = new ArrayList<InetAddress>();
+ mGateway = null;
+ mHttpProxy = null;
+ }
+
+ /**
+ * Implement the Parcelable interface
+ * @hide
+ */
+ public int describeContents() {
+ return 0;
+ }
+
+ public synchronized String toString() {
+ String ifaceName = (mIface == null ? "" : "InterfaceName: " + mIface.getName() + " ");
+
+ String ip = "IpAddresses: [";
+ for (InetAddress addr : mAddresses) ip += addr.toString() + ",";
+ ip += "] ";
+
+ String dns = "DnsAddresses: [";
+ for (InetAddress addr : mDnses) dns += addr.toString() + ",";
+ dns += "] ";
+
+ String proxy = (mHttpProxy == null ? "" : "HttpProxy: " + mHttpProxy.toString() + " ");
+ String gateway = (mGateway == null ? "" : "Gateway: " + mGateway.toString() + " ");
+
+ return ifaceName + ip + gateway + dns + proxy;
+ }
+
+ /**
+ * Implement the Parcelable interface.
+ * @hide
+ */
+ public synchronized void writeToParcel(Parcel dest, int flags) {
+ dest.writeString(getInterfaceName());
+ dest.writeInt(mAddresses.size());
+ for(InetAddress a : mAddresses) {
+ dest.writeString(a.getHostName());
+ dest.writeByteArray(a.getAddress());
+ }
+ dest.writeInt(mDnses.size());
+ for(InetAddress d : mDnses) {
+ dest.writeString(d.getHostName());
+ dest.writeByteArray(d.getAddress());
+ }
+ if (mGateway != null) {
+ dest.writeByte((byte)1);
+ dest.writeString(mGateway.getHostName());
+ dest.writeByteArray(mGateway.getAddress());
+ } else {
+ dest.writeByte((byte)0);
+ }
+ if (mHttpProxy != null) {
+ dest.writeByte((byte)1);
+ dest.writeParcelable(mHttpProxy, flags);
+ } else {
+ dest.writeByte((byte)0);
+ }
+ }
+
+ /**
+ * Implement the Parcelable interface.
+ * @hide
+ */
+ public static final Creator<NetworkProperties> CREATOR =
+ new Creator<NetworkProperties>() {
+ public NetworkProperties createFromParcel(Parcel in) {
+ NetworkProperties netProp = new NetworkProperties();
+ String iface = in.readString();
+ if (iface != null) {
+ try {
+ netProp.setInterface(NetworkInterface.getByName(iface));
+ } catch (Exception e) {
+ return null;
+ }
+ }
+ int addressCount = in.readInt();
+ for (int i=0; i<addressCount; i++) {
+ try {
+ netProp.addAddress(InetAddress.getByAddress(in.readString(),
+ in.createByteArray()));
+ } catch (UnknownHostException e) { }
+ }
+ addressCount = in.readInt();
+ for (int i=0; i<addressCount; i++) {
+ try {
+ netProp.addDns(InetAddress.getByAddress(in.readString(),
+ in.createByteArray()));
+ } catch (UnknownHostException e) { }
+ }
+ if (in.readByte() == 1) {
+ try {
+ netProp.setGateway(InetAddress.getByAddress(in.readString(),
+ in.createByteArray()));
+ } catch (UnknownHostException e) {}
+ }
+ if (in.readByte() == 1) {
+ netProp.setHttpProxy((ProxyProperties)in.readParcelable(null));
+ }
+ return netProp;
+ }
+
+ public NetworkProperties[] newArray(int size) {
+ return new NetworkProperties[size];
+ }
+ };
+}
diff --git a/core/java/android/net/NetworkStateTracker.java b/core/java/android/net/NetworkStateTracker.java
index cd8e7f1..44215e7 100644
--- a/core/java/android/net/NetworkStateTracker.java
+++ b/core/java/android/net/NetworkStateTracker.java
@@ -46,22 +46,17 @@
public NetworkInfo getNetworkInfo();
/**
+ * Fetch NetworkProperties for the network
+ */
+ public NetworkProperties getNetworkProperties();
+
+ /**
* Return the system properties name associated with the tcp buffer sizes
* for this network.
*/
public String getTcpBufferSizesPropName();
/**
- * Return the DNS property names for this network.
- */
- public String[] getDnsPropNames();
-
- /**
- * Fetch interface name of the interface
- */
- public String getInterfaceName();
-
- /**
* Check if private DNS route is set for the network
*/
public boolean isPrivateDnsRouteSet();
diff --git a/core/java/android/net/NetworkUtils.java b/core/java/android/net/NetworkUtils.java
index a3ae01b..564bc1f 100644
--- a/core/java/android/net/NetworkUtils.java
+++ b/core/java/android/net/NetworkUtils.java
@@ -32,13 +32,37 @@
public native static int disableInterface(String interfaceName);
/** Add a route to the specified host via the named interface. */
- public native static int addHostRoute(String interfaceName, int hostaddr);
+ public static int addHostRoute(String interfaceName, InetAddress hostaddr) {
+ int v4Int = v4StringToInt(hostaddr.getHostAddress());
+ if (v4Int != 0) {
+ return addHostRouteNative(interfaceName, v4Int);
+ } else {
+ return -1;
+ }
+ }
+ private native static int addHostRouteNative(String interfaceName, int hostaddr);
/** Add a default route for the named interface. */
- public native static int setDefaultRoute(String interfaceName, int gwayAddr);
+ public static int setDefaultRoute(String interfaceName, InetAddress gwayAddr) {
+ int v4Int = v4StringToInt(gwayAddr.getHostAddress());
+ if (v4Int != 0) {
+ return setDefaultRouteNative(interfaceName, v4Int);
+ } else {
+ return -1;
+ }
+ }
+ private native static int setDefaultRouteNative(String interfaceName, int hostaddr);
/** Return the gateway address for the default route for the named interface. */
- public native static int getDefaultRoute(String interfaceName);
+ public static InetAddress getDefaultRoute(String interfaceName) {
+ int addr = getDefaultRouteNative(interfaceName);
+ try {
+ return InetAddress.getByAddress(v4IntToArray(addr));
+ } catch (UnknownHostException e) {
+ return null;
+ }
+ }
+ private native static int getDefaultRouteNative(String interfaceName);
/** Remove host routes that uses the named interface. */
public native static int removeHostRoutes(String interfaceName);
@@ -105,27 +129,30 @@
private native static boolean configureNative(
String interfaceName, int ipAddress, int netmask, int gateway, int dns1, int dns2);
- /**
- * Look up a host name and return the result as an int. Works if the argument
- * is an IP address in dot notation. Obviously, this can only be used for IPv4
- * addresses.
- * @param hostname the name of the host (or the IP address)
- * @return the IP address as an {@code int} in network byte order
- */
- public static int lookupHost(String hostname) {
- InetAddress inetAddress;
+ // The following two functions are glue to tie the old int-based address scheme
+ // to the new InetAddress scheme. They should go away when we go fully to InetAddress
+ // TODO - remove when we switch fully to InetAddress
+ public static byte[] v4IntToArray(int addr) {
+ byte[] addrBytes = new byte[4];
+ addrBytes[0] = (byte)(addr & 0xff);
+ addrBytes[1] = (byte)((addr >> 8) & 0xff);
+ addrBytes[2] = (byte)((addr >> 16) & 0xff);
+ addrBytes[3] = (byte)((addr >> 24) & 0xff);
+ return addrBytes;
+ }
+
+ public static int v4StringToInt(String str) {
+ int result = 0;
+ String[] array = str.split("\\.");
+ if (array.length != 4) return 0;
try {
- inetAddress = InetAddress.getByName(hostname);
- } catch (UnknownHostException e) {
- return -1;
+ result = Integer.parseInt(array[3]);
+ result = (result << 8) + Integer.parseInt(array[2]);
+ result = (result << 8) + Integer.parseInt(array[1]);
+ result = (result << 8) + Integer.parseInt(array[0]);
+ } catch (NumberFormatException e) {
+ return 0;
}
- byte[] addrBytes;
- int addr;
- addrBytes = inetAddress.getAddress();
- addr = ((addrBytes[3] & 0xff) << 24)
- | ((addrBytes[2] & 0xff) << 16)
- | ((addrBytes[1] & 0xff) << 8)
- | (addrBytes[0] & 0xff);
- return addr;
+ return result;
}
}
diff --git a/core/java/android/net/ProxyProperties.java b/core/java/android/net/ProxyProperties.java
new file mode 100644
index 0000000..6828dd4
--- /dev/null
+++ b/core/java/android/net/ProxyProperties.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.net;
+
+
+import android.os.Parcel;
+import android.os.Parcelable;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+/**
+ * A container class for the http proxy info
+ * @hide
+ */
+public class ProxyProperties implements Parcelable {
+
+ private InetAddress mProxy;
+ private int mPort;
+ private String mExclusionList;
+
+ public ProxyProperties() {
+ }
+
+ public synchronized InetAddress getAddress() {
+ return mProxy;
+ }
+ public synchronized void setAddress(InetAddress proxy) {
+ mProxy = proxy;
+ }
+
+ public synchronized int getPort() {
+ return mPort;
+ }
+ public synchronized void setPort(int port) {
+ mPort = port;
+ }
+
+ public synchronized String getExclusionList() {
+ return mExclusionList;
+ }
+ public synchronized void setExclusionList(String exclusionList) {
+ mExclusionList = exclusionList;
+ }
+
+ /**
+ * Implement the Parcelable interface
+ * @hide
+ */
+ public int describeContents() {
+ return 0;
+ }
+
+ /**
+ * Implement the Parcelable interface.
+ * @hide
+ */
+ public synchronized void writeToParcel(Parcel dest, int flags) {
+ if (mProxy != null) {
+ dest.writeByte((byte)1);
+ dest.writeString(mProxy.getHostName());
+ dest.writeByteArray(mProxy.getAddress());
+ } else {
+ dest.writeByte((byte)0);
+ }
+ dest.writeInt(mPort);
+ dest.writeString(mExclusionList);
+ }
+
+ /**
+ * Implement the Parcelable interface.
+ * @hide
+ */
+ public static final Creator<ProxyProperties> CREATOR =
+ new Creator<ProxyProperties>() {
+ public ProxyProperties createFromParcel(Parcel in) {
+ ProxyProperties proxyProperties = new ProxyProperties();
+ if (in.readByte() == 1) {
+ try {
+ proxyProperties.setAddress(InetAddress.getByAddress(in.readString(),
+ in.createByteArray()));
+ } catch (UnknownHostException e) {}
+ }
+ proxyProperties.setPort(in.readInt());
+ proxyProperties.setExclusionList(in.readString());
+ return proxyProperties;
+ }
+
+ public ProxyProperties[] newArray(int size) {
+ return new ProxyProperties[size];
+ }
+ };
+
+};
diff --git a/core/java/android/net/SSLCertificateSocketFactory.java b/core/java/android/net/SSLCertificateSocketFactory.java
index 9ad125b..31acb5b 100644
--- a/core/java/android/net/SSLCertificateSocketFactory.java
+++ b/core/java/android/net/SSLCertificateSocketFactory.java
@@ -247,13 +247,16 @@
/**
* {@inheritDoc}
*
- * <p>This method verifies the peer's certificate hostname after connecting.
+ * <p>This method verifies the peer's certificate hostname after connecting
+ * (unless created with {@link #getInsecure(int, SSLSessionCache)}).
*/
@Override
public Socket createSocket(Socket k, String host, int port, boolean close) throws IOException {
OpenSSLSocketImpl s = (OpenSSLSocketImpl) getDelegate().createSocket(k, host, port, close);
s.setHandshakeTimeout(mHandshakeTimeoutMillis);
- verifyHostname(s, host);
+ if (mSecure) {
+ verifyHostname(s, host);
+ }
return s;
}
@@ -305,7 +308,8 @@
/**
* {@inheritDoc}
*
- * <p>This method verifies the peer's certificate hostname after connecting.
+ * <p>This method verifies the peer's certificate hostname after connecting
+ * (unless created with {@link #getInsecure(int, SSLSessionCache)}).
*/
@Override
public Socket createSocket(String host, int port, InetAddress localAddr, int localPort)
@@ -313,20 +317,25 @@
OpenSSLSocketImpl s = (OpenSSLSocketImpl) getDelegate().createSocket(
host, port, localAddr, localPort);
s.setHandshakeTimeout(mHandshakeTimeoutMillis);
- verifyHostname(s, host);
+ if (mSecure) {
+ verifyHostname(s, host);
+ }
return s;
}
/**
* {@inheritDoc}
*
- * <p>This method verifies the peer's certificate hostname after connecting.
+ * <p>This method verifies the peer's certificate hostname after connecting
+ * (unless created with {@link #getInsecure(int, SSLSessionCache)}).
*/
@Override
public Socket createSocket(String host, int port) throws IOException {
OpenSSLSocketImpl s = (OpenSSLSocketImpl) getDelegate().createSocket(host, port);
s.setHandshakeTimeout(mHandshakeTimeoutMillis);
- verifyHostname(s, host);
+ if (mSecure) {
+ verifyHostname(s, host);
+ }
return s;
}
diff --git a/core/java/android/os/Debug.java b/core/java/android/os/Debug.java
index 2e14667..d23b161 100644
--- a/core/java/android/os/Debug.java
+++ b/core/java/android/os/Debug.java
@@ -730,7 +730,7 @@
}
/**
- * Dump "hprof" data to the specified file. This will cause a GC.
+ * Dump "hprof" data to the specified file. This may cause a GC.
*
* @param fileName Full pathname of output file (e.g. "/sdcard/dump.hprof").
* @throws UnsupportedOperationException if the VM was built without
@@ -742,11 +742,24 @@
}
/**
- * Collect "hprof" and send it to DDMS. This will cause a GC.
+ * Like dumpHprofData(String), but takes an already-opened
+ * FileDescriptor to which the trace is written. The file name is also
+ * supplied simply for logging. Makes a dup of the file descriptor.
+ *
+ * Primarily for use by the "am" shell command.
+ *
+ * @hide
+ */
+ public static void dumpHprofData(String fileName, FileDescriptor fd)
+ throws IOException {
+ VMDebug.dumpHprofData(fileName, fd);
+ }
+
+ /**
+ * Collect "hprof" and send it to DDMS. This may cause a GC.
*
* @throws UnsupportedOperationException if the VM was built without
* HPROF support.
- *
* @hide
*/
public static void dumpHprofDataDdms() {
@@ -754,6 +767,13 @@
}
/**
+ * Writes native heap data to the specified file descriptor.
+ *
+ * @hide
+ */
+ public static native void dumpNativeHeap(FileDescriptor fd);
+
+ /**
* Returns the number of sent transactions from this process.
* @return The number of sent transactions or -1 if it could not read t.
*/
diff --git a/core/java/android/provider/Calendar.java b/core/java/android/provider/Calendar.java
index a23a5a7..10f1d2b 100644
--- a/core/java/android/provider/Calendar.java
+++ b/core/java/android/provider/Calendar.java
@@ -289,6 +289,7 @@
DatabaseUtils.cursorStringToContentValuesIfPresent(cursor, cv, Calendars.SYNC1);
DatabaseUtils.cursorStringToContentValuesIfPresent(cursor, cv, Calendars.SYNC2);
DatabaseUtils.cursorStringToContentValuesIfPresent(cursor, cv, Calendars.SYNC3);
+ DatabaseUtils.cursorStringToContentValuesIfPresent(cursor, cv, Calendars.SYNC4);
DatabaseUtils.cursorStringToContentValuesIfPresent(cursor, cv, Calendars.NAME);
DatabaseUtils.cursorStringToContentValuesIfPresent(cursor, cv,
diff --git a/core/java/android/provider/Downloads.java b/core/java/android/provider/Downloads.java
index 348e9e8..1a4f8c0 100644
--- a/core/java/android/provider/Downloads.java
+++ b/core/java/android/provider/Downloads.java
@@ -899,6 +899,12 @@
public static final int DESTINATION_CACHE_PARTITION_NOROAMING = 3;
/**
+ * This download will be saved to the location given by the file URI in
+ * {@link #COLUMN_FILE_NAME_HINT}.
+ */
+ public static final int DESTINATION_FILE_URI = 4;
+
+ /**
* This download is allowed to run.
*/
public static final int CONTROL_RUN = 0;
diff --git a/core/java/android/provider/MediaStore.java b/core/java/android/provider/MediaStore.java
index 2c4a9f5..293d31c 100644
--- a/core/java/android/provider/MediaStore.java
+++ b/core/java/android/provider/MediaStore.java
@@ -237,6 +237,14 @@
* <P>Type: TEXT</P>
*/
public static final String MIME_TYPE = "mime_type";
+
+ /**
+ * The MTP object handle of a newly transfered file.
+ * Used internally by the MediaScanner
+ * <P>Type: INTEGER</P>
+ * @hide
+ */
+ public static final String MTP_OBJECT_HANDLE = "mtp_object_handle";
}
@@ -274,6 +282,19 @@
* <P>Type: INTEGER</P>
*/
public static final String PARENT = "parent";
+
+ /**
+ * Identifier for the media table containing the object.
+ * Used internally by MediaProvider
+ * <P>Type: INTEGER</P>
+ */
+ public static final String MEDIA_TABLE = "media_table";
+
+ /**
+ * The ID of the object in its media table.
+ * <P>Type: INTEGER</P>
+ */
+ public static final String MEDIA_ID = "media_id";
}
}
diff --git a/core/java/android/service/wallpaper/WallpaperService.java b/core/java/android/service/wallpaper/WallpaperService.java
index 6f12f19..e26a090 100644
--- a/core/java/android/service/wallpaper/WallpaperService.java
+++ b/core/java/android/service/wallpaper/WallpaperService.java
@@ -336,7 +336,7 @@
? (mWindowFlags&~WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE)
: (mWindowFlags|WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE);
if (mCreated) {
- updateSurface(false, false);
+ updateSurface(false, false, false);
}
}
@@ -421,6 +421,13 @@
}
/**
+ * Convenience for {@link SurfaceHolder.Callback2#surfaceRedrawNeeded
+ * SurfaceHolder.Callback.surfaceRedrawNeeded()}.
+ */
+ public void onSurfaceRedrawNeeded(SurfaceHolder holder) {
+ }
+
+ /**
* Convenience for {@link SurfaceHolder.Callback#surfaceCreated
* SurfaceHolder.Callback.surfaceCreated()}.
*/
@@ -450,7 +457,7 @@
}
}
- void updateSurface(boolean forceRelayout, boolean forceReport) {
+ void updateSurface(boolean forceRelayout, boolean forceReport, boolean redrawNeeded) {
if (mDestroyed) {
Log.w(TAG, "Ignoring updateSurface: destroyed");
}
@@ -467,7 +474,7 @@
final boolean typeChanged = mType != mSurfaceHolder.getRequestedType();
final boolean flagsChanged = mCurWindowFlags != mWindowFlags;
if (forceRelayout || creating || surfaceCreating || formatChanged || sizeChanged
- || typeChanged || flagsChanged) {
+ || typeChanged || flagsChanged || redrawNeeded) {
if (DEBUG) Log.v(TAG, "Changes: creating=" + creating
+ " format=" + formatChanged + " size=" + sizeChanged);
@@ -555,6 +562,10 @@
}
}
}
+
+ redrawNeeded |= creating
+ || (relayoutResult&WindowManagerImpl.RELAYOUT_FIRST_TIME) != 0;
+
if (forceReport || creating || surfaceCreating
|| formatChanged || sizeChanged) {
if (DEBUG) {
@@ -578,10 +589,24 @@
}
}
}
+
+ if (redrawNeeded) {
+ onSurfaceRedrawNeeded(mSurfaceHolder);
+ SurfaceHolder.Callback callbacks[] = mSurfaceHolder.getCallbacks();
+ if (callbacks != null) {
+ for (SurfaceHolder.Callback c : callbacks) {
+ if (c instanceof SurfaceHolder.Callback2) {
+ ((SurfaceHolder.Callback2)c).surfaceRedrawNeeded(
+ mSurfaceHolder);
+ }
+ }
+ }
+ }
+
} finally {
mIsCreating = false;
mSurfaceCreated = true;
- if (creating || (relayoutResult&WindowManagerImpl.RELAYOUT_FIRST_TIME) != 0) {
+ if (redrawNeeded) {
mSession.finishDrawing(mWindow);
}
}
@@ -618,7 +643,7 @@
onCreate(mSurfaceHolder);
mInitializing = false;
- updateSurface(false, false);
+ updateSurface(false, false, false);
}
void doDesiredSizeChanged(int desiredWidth, int desiredHeight) {
@@ -647,7 +672,7 @@
// If becoming visible, in preview mode the surface
// may have been destroyed so now we need to make
// sure it is re-created.
- updateSurface(false, false);
+ updateSurface(false, false, false);
}
onVisibilityChanged(visible);
}
@@ -852,7 +877,7 @@
return;
}
case MSG_UPDATE_SURFACE:
- mEngine.updateSurface(true, false);
+ mEngine.updateSurface(true, false, false);
break;
case MSG_VISIBILITY_CHANGED:
if (DEBUG) Log.v(TAG, "Visibility change in " + mEngine
@@ -868,14 +893,8 @@
} break;
case MSG_WINDOW_RESIZED: {
final boolean reportDraw = message.arg1 != 0;
- mEngine.updateSurface(true, false);
+ mEngine.updateSurface(true, false, reportDraw);
mEngine.doOffsetsChanged();
- if (reportDraw) {
- try {
- mEngine.mSession.finishDrawing(mEngine.mWindow);
- } catch (RemoteException e) {
- }
- }
} break;
case MSG_TOUCH_EVENT: {
MotionEvent ev = (MotionEvent)message.obj;
diff --git a/core/java/android/text/Selection.java b/core/java/android/text/Selection.java
index bb98bce..13cb5e6 100644
--- a/core/java/android/text/Selection.java
+++ b/core/java/android/text/Selection.java
@@ -417,8 +417,8 @@
}
}
- private static final class START implements NoCopySpan { };
- private static final class END implements NoCopySpan { };
+ private static final class START implements NoCopySpan { }
+ private static final class END implements NoCopySpan { }
/*
* Public constants
diff --git a/core/java/android/text/format/Time.java b/core/java/android/text/format/Time.java
index 8eae111..c05a8fe 100644
--- a/core/java/android/text/format/Time.java
+++ b/core/java/android/text/format/Time.java
@@ -32,7 +32,7 @@
private static final String Y_M_D_T_H_M_S_000 = "%Y-%m-%dT%H:%M:%S.000";
private static final String Y_M_D_T_H_M_S_000_Z = "%Y-%m-%dT%H:%M:%S.000Z";
private static final String Y_M_D = "%Y-%m-%d";
-
+
public static final String TIMEZONE_UTC = "UTC";
/**
@@ -170,11 +170,11 @@
public Time() {
this(TimeZone.getDefault().getID());
}
-
+
/**
* A copy constructor. Construct a Time object by copying the given
* Time object. No normalization occurs.
- *
+ *
* @param other
*/
public Time(Time other) {
@@ -185,17 +185,17 @@
* Ensures the values in each field are in range. For example if the
* current value of this calendar is March 32, normalize() will convert it
* to April 1. It also fills in weekDay, yearDay, isDst and gmtoff.
- *
+ *
* <p>
* If "ignoreDst" is true, then this method sets the "isDst" field to -1
* (the "unknown" value) before normalizing. It then computes the
* correct value for "isDst".
- *
+ *
* <p>
* See {@link #toMillis(boolean)} for more information about when to
* use <tt>true</tt> or <tt>false</tt> for "ignoreDst".
- *
- * @return the UTC milliseconds since the epoch
+ *
+ * @return the UTC milliseconds since the epoch
*/
native public long normalize(boolean ignoreDst);
@@ -379,13 +379,13 @@
* Parses a date-time string in either the RFC 2445 format or an abbreviated
* format that does not include the "time" field. For example, all of the
* following strings are valid:
- *
+ *
* <ul>
* <li>"20081013T160000Z"</li>
* <li>"20081013T160000"</li>
* <li>"20081013"</li>
* </ul>
- *
+ *
* Returns whether or not the time is in UTC (ends with Z). If the string
* ends with "Z" then the timezone is set to UTC. If the date-time string
* included only a date and no time field, then the <code>allDay</code>
@@ -396,10 +396,10 @@
* <code>yearDay</code>, and <code>gmtoff</code> are always set to zero,
* and the field <code>isDst</code> is set to -1 (unknown). To set those
* fields, call {@link #normalize(boolean)} after parsing.
- *
+ *
* To parse a date-time string and convert it to UTC milliseconds, do
* something like this:
- *
+ *
* <pre>
* Time time = new Time();
* String date = "20081013T160000Z";
@@ -428,25 +428,25 @@
* Parse a time in RFC 3339 format. This method also parses simple dates
* (that is, strings that contain no time or time offset). For example,
* all of the following strings are valid:
- *
+ *
* <ul>
* <li>"2008-10-13T16:00:00.000Z"</li>
* <li>"2008-10-13T16:00:00.000+07:00"</li>
* <li>"2008-10-13T16:00:00.000-07:00"</li>
* <li>"2008-10-13"</li>
* </ul>
- *
+ *
* <p>
* If the string contains a time and time offset, then the time offset will
* be used to convert the time value to UTC.
* </p>
- *
+ *
* <p>
* If the given string contains just a date (with no time field), then
* the {@link #allDay} field is set to true and the {@link #hour},
* {@link #minute}, and {@link #second} fields are set to zero.
* </p>
- *
+ *
* <p>
* Returns true if the resulting time value is in UTC time.
* </p>
@@ -462,7 +462,7 @@
}
return false;
}
-
+
native private boolean nativeParse3339(String s);
/**
@@ -484,13 +484,13 @@
* <em>not</em> change any of the fields in this Time object. If you want
* to normalize the fields in this Time object and also get the milliseconds
* then use {@link #normalize(boolean)}.
- *
+ *
* <p>
* If "ignoreDst" is false, then this method uses the current setting of the
* "isDst" field and will adjust the returned time if the "isDst" field is
* wrong for the given time. See the sample code below for an example of
* this.
- *
+ *
* <p>
* If "ignoreDst" is true, then this method ignores the current setting of
* the "isDst" field in this Time object and will instead figure out the
@@ -499,27 +499,27 @@
* correct value of the "isDst" field is when the time is inherently
* ambiguous because it falls in the hour that is repeated when switching
* from Daylight-Saving Time to Standard Time.
- *
+ *
* <p>
* Here is an example where <tt>toMillis(true)</tt> adjusts the time,
* assuming that DST changes at 2am on Sunday, Nov 4, 2007.
- *
+ *
* <pre>
* Time time = new Time();
- * time.set(2007, 10, 4); // set the date to Nov 4, 2007, 12am
+ * time.set(4, 10, 2007); // set the date to Nov 4, 2007, 12am
* time.normalize(); // this sets isDst = 1
* time.monthDay += 1; // changes the date to Nov 5, 2007, 12am
* millis = time.toMillis(false); // millis is Nov 4, 2007, 11pm
* millis = time.toMillis(true); // millis is Nov 5, 2007, 12am
* </pre>
- *
+ *
* <p>
* To avoid this problem, use <tt>toMillis(true)</tt>
* after adding or subtracting days or explicitly setting the "monthDay"
* field. On the other hand, if you are adding
* or subtracting hours or minutes, then you should use
* <tt>toMillis(false)</tt>.
- *
+ *
* <p>
* You should also use <tt>toMillis(false)</tt> if you want
* to read back the same milliseconds that you set with {@link #set(long)}
@@ -531,14 +531,14 @@
* Sets the fields in this Time object given the UTC milliseconds. After
* this method returns, all the fields are normalized.
* This also sets the "isDst" field to the correct value.
- *
+ *
* @param millis the time in UTC milliseconds since the epoch.
*/
native public void set(long millis);
/**
* Format according to RFC 2445 DATETIME type.
- *
+ *
* <p>
* The same as format("%Y%m%dT%H%M%S").
*/
@@ -584,7 +584,7 @@
* Sets the date from the given fields. Also sets allDay to true.
* Sets weekDay, yearDay and gmtoff to 0, and isDst to -1.
* Call {@link #normalize(boolean)} if you need those.
- *
+ *
* @param monthDay the day of the month (in the range [1,31])
* @param month the zero-based month number (in the range [0,11])
* @param year the year
@@ -606,7 +606,7 @@
/**
* Returns true if the time represented by this Time object occurs before
* the given time.
- *
+ *
* @param that a given Time object to compare against
* @return true if this time is less than the given time
*/
@@ -618,7 +618,7 @@
/**
* Returns true if the time represented by this Time object occurs after
* the given time.
- *
+ *
* @param that a given Time object to compare against
* @return true if this time is greater than the given time
*/
@@ -632,12 +632,12 @@
* closest Thursday yearDay.
*/
private static final int[] sThursdayOffset = { -3, 3, 2, 1, 0, -1, -2 };
-
+
/**
* Computes the week number according to ISO 8601. The current Time
* object must already be normalized because this method uses the
* yearDay and weekDay fields.
- *
+ *
* <p>
* In IS0 8601, weeks start on Monday.
* The first week of the year (week 1) is defined by ISO 8601 as the
@@ -645,12 +645,12 @@
* Or equivalently, the week containing January 4. Or equivalently,
* the week with the year's first Thursday in it.
* </p>
- *
+ *
* <p>
* The week number can be calculated by counting Thursdays. Week N
* contains the Nth Thursday of the year.
* </p>
- *
+ *
* @return the ISO week number.
*/
public int getWeekNumber() {
@@ -661,7 +661,7 @@
if (closestThursday >= 0 && closestThursday <= 364) {
return closestThursday / 7 + 1;
}
-
+
// The week crosses a year boundary.
Time temp = new Time(this);
temp.monthDay += sThursdayOffset[weekDay];
@@ -670,7 +670,7 @@
}
/**
- * Return a string in the RFC 3339 format.
+ * Return a string in the RFC 3339 format.
* <p>
* If allDay is true, expresses the time as Y-M-D</p>
* <p>
@@ -691,13 +691,13 @@
int offset = (int)Math.abs(gmtoff);
int minutes = (offset % 3600) / 60;
int hours = offset / 3600;
-
+
return String.format("%s%s%02d:%02d", base, sign, hours, minutes);
}
}
-
+
/**
- * Returns true if the day of the given time is the epoch on the Julian Calendar
+ * Returns true if the day of the given time is the epoch on the Julian Calendar
* (January 1, 1970 on the Gregorian calendar).
*
* @param time the time to test
@@ -707,7 +707,7 @@
long millis = time.toMillis(true);
return getJulianDay(millis, 0) == EPOCH_JULIAN_DAY;
}
-
+
/**
* Computes the Julian day number, given the UTC milliseconds
* and the offset (in seconds) from UTC. The Julian day for a given
@@ -716,10 +716,10 @@
* what timezone is being used. The Julian day is useful for testing
* if two events occur on the same day and for determining the relative
* time of an event from the present ("yesterday", "3 days ago", etc.).
- *
+ *
* <p>
* Use {@link #toMillis(boolean)} to get the milliseconds.
- *
+ *
* @param millis the time in UTC milliseconds
* @param gmtoff the offset from UTC in seconds
* @return the Julian day
@@ -729,7 +729,7 @@
long julianDay = (millis + offsetMillis) / DateUtils.DAY_IN_MILLIS;
return (int) julianDay + EPOCH_JULIAN_DAY;
}
-
+
/**
* <p>Sets the time from the given Julian day number, which must be based on
* the same timezone that is set in this Time object. The "gmtoff" field
@@ -738,7 +738,7 @@
* After this method returns all the fields will be normalized and the time
* will be set to 12am at the beginning of the given Julian day.
* </p>
- *
+ *
* <p>
* The only exception to this is if 12am does not exist for that day because
* of daylight saving time. For example, Cairo, Eqypt moves time ahead one
@@ -746,7 +746,7 @@
* also change daylight saving time at 12am. In those cases, the time
* will be set to 1am.
* </p>
- *
+ *
* @param julianDay the Julian day in the timezone for this Time object
* @return the UTC milliseconds for the beginning of the Julian day
*/
@@ -756,13 +756,13 @@
// the day.
long millis = (julianDay - EPOCH_JULIAN_DAY) * DateUtils.DAY_IN_MILLIS;
set(millis);
-
+
// Figure out how close we are to the requested Julian day.
// We can't be off by more than a day.
int approximateDay = getJulianDay(millis, gmtoff);
int diff = julianDay - approximateDay;
monthDay += diff;
-
+
// Set the time to 12am and re-normalize.
hour = 0;
minute = 0;
diff --git a/core/java/android/text/method/ArrowKeyMovementMethod.java b/core/java/android/text/method/ArrowKeyMovementMethod.java
index 9df63a9..79a0c37 100644
--- a/core/java/android/text/method/ArrowKeyMovementMethod.java
+++ b/core/java/android/text/method/ArrowKeyMovementMethod.java
@@ -17,13 +17,11 @@
package android.text.method;
import android.text.Layout;
-import android.text.NoCopySpan;
import android.text.Selection;
import android.text.Spannable;
import android.view.KeyEvent;
import android.view.MotionEvent;
import android.view.View;
-import android.view.ViewConfiguration;
import android.widget.TextView;
import android.widget.TextView.CursorController;
@@ -38,13 +36,18 @@
*/
protected CursorController mCursorController;
+ private boolean isCap(Spannable buffer) {
+ return ((MetaKeyKeyListener.getMetaState(buffer, KeyEvent.META_SHIFT_ON) == 1) ||
+ (MetaKeyKeyListener.getMetaState(buffer, MetaKeyKeyListener.META_SELECTING) != 0));
+ }
+
+ private boolean isAlt(Spannable buffer) {
+ return MetaKeyKeyListener.getMetaState(buffer, KeyEvent.META_ALT_ON) == 1;
+ }
+
private boolean up(TextView widget, Spannable buffer) {
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
- boolean alt = MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_ALT_ON) == 1;
+ boolean cap = isCap(buffer);
+ boolean alt = isAlt(buffer);
Layout layout = widget.getLayout();
if (cap) {
@@ -65,12 +68,8 @@
}
private boolean down(TextView widget, Spannable buffer) {
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
- boolean alt = MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_ALT_ON) == 1;
+ boolean cap = isCap(buffer);
+ boolean alt = isAlt(buffer);
Layout layout = widget.getLayout();
if (cap) {
@@ -91,12 +90,8 @@
}
private boolean left(TextView widget, Spannable buffer) {
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
- boolean alt = MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_ALT_ON) == 1;
+ boolean cap = isCap(buffer);
+ boolean alt = isAlt(buffer);
Layout layout = widget.getLayout();
if (cap) {
@@ -115,12 +110,8 @@
}
private boolean right(TextView widget, Spannable buffer) {
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
- boolean alt = MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_ALT_ON) == 1;
+ boolean cap = isCap(buffer);
+ boolean alt = isAlt(buffer);
Layout layout = widget.getLayout();
if (cap) {
@@ -138,35 +129,6 @@
}
}
- private int getOffset(int x, int y, TextView widget){
- // Converts the absolute X,Y coordinates to the character offset for the
- // character whose position is closest to the specified
- // horizontal position.
- x -= widget.getTotalPaddingLeft();
- y -= widget.getTotalPaddingTop();
-
- // Clamp the position to inside of the view.
- if (x < 0) {
- x = 0;
- } else if (x >= (widget.getWidth()-widget.getTotalPaddingRight())) {
- x = widget.getWidth()-widget.getTotalPaddingRight() - 1;
- }
- if (y < 0) {
- y = 0;
- } else if (y >= (widget.getHeight()-widget.getTotalPaddingBottom())) {
- y = widget.getHeight()-widget.getTotalPaddingBottom() - 1;
- }
-
- x += widget.getScrollX();
- y += widget.getScrollY();
-
- Layout layout = widget.getLayout();
- int line = layout.getLineForVertical(y);
-
- int offset = layout.getOffsetForHorizontal(line, x);
- return offset;
- }
-
public boolean onKeyDown(TextView widget, Spannable buffer, int keyCode, KeyEvent event) {
if (executeDown(widget, buffer, keyCode)) {
MetaKeyKeyListener.adjustMetaAfterKeypress(buffer);
@@ -198,10 +160,9 @@
break;
case KeyEvent.KEYCODE_DPAD_CENTER:
- if (MetaKeyKeyListener.getMetaState(buffer, MetaKeyKeyListener.META_SELECTING) != 0) {
- if (widget.showContextMenu()) {
+ if ((MetaKeyKeyListener.getMetaState(buffer, MetaKeyKeyListener.META_SELECTING) != 0) &&
+ (widget.showContextMenu())) {
handled = true;
- }
}
}
@@ -219,8 +180,7 @@
public boolean onKeyOther(TextView view, Spannable text, KeyEvent event) {
int code = event.getKeyCode();
- if (code != KeyEvent.KEYCODE_UNKNOWN
- && event.getAction() == KeyEvent.ACTION_MULTIPLE) {
+ if (code != KeyEvent.KEYCODE_UNKNOWN && event.getAction() == KeyEvent.ACTION_MULTIPLE) {
int repeat = event.getRepeatCount();
boolean handled = false;
while ((--repeat) > 0) {
@@ -257,54 +217,20 @@
if (widget.isFocused() && !widget.didTouchFocusSelect()) {
if (event.getAction() == MotionEvent.ACTION_DOWN) {
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
- int x = (int) event.getX();
- int y = (int) event.getY();
- int offset = getOffset(x, y, widget);
-
+ boolean cap = isCap(buffer);
if (cap) {
- buffer.setSpan(LAST_TAP_DOWN, offset, offset,
- Spannable.SPAN_POINT_POINT);
+ int offset = widget.getOffset((int) event.getX(), (int) event.getY());
+
+ buffer.setSpan(LAST_TAP_DOWN, offset, offset, Spannable.SPAN_POINT_POINT);
// Disallow intercepting of the touch events, so that
// users can scroll and select at the same time.
// without this, users would get booted out of select
// mode once the view detected it needed to scroll.
widget.getParent().requestDisallowInterceptTouchEvent(true);
- } else {
- OnePointFiveTapState[] tap = buffer.getSpans(0, buffer.length(),
- OnePointFiveTapState.class);
-
- if (tap.length > 0) {
- if (event.getEventTime() - tap[0].mWhen <=
- ViewConfiguration.getDoubleTapTimeout() &&
- sameWord(buffer, offset, Selection.getSelectionEnd(buffer))) {
-
- tap[0].active = true;
- MetaKeyKeyListener.startSelecting(widget, buffer);
- widget.getParent().requestDisallowInterceptTouchEvent(true);
- buffer.setSpan(LAST_TAP_DOWN, offset, offset,
- Spannable.SPAN_POINT_POINT);
- }
-
- tap[0].mWhen = event.getEventTime();
- } else {
- OnePointFiveTapState newtap = new OnePointFiveTapState();
- newtap.mWhen = event.getEventTime();
- newtap.active = false;
- buffer.setSpan(newtap, 0, buffer.length(),
- Spannable.SPAN_INCLUSIVE_INCLUSIVE);
- }
}
} else if (event.getAction() == MotionEvent.ACTION_MOVE) {
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
-
+ boolean cap = isCap(buffer);
if (cap && handled) {
// Before selecting, make sure we've moved out of the "slop".
@@ -318,39 +244,9 @@
// Update selection as we're moving the selection area.
// Get the current touch position
- int x = (int) event.getX();
- int y = (int) event.getY();
- int offset = getOffset(x, y, widget);
+ int offset = widget.getOffset((int) event.getX(), (int) event.getY());
- final OnePointFiveTapState[] tap = buffer.getSpans(0, buffer.length(),
- OnePointFiveTapState.class);
-
- if (tap.length > 0 && tap[0].active) {
- // Get the last down touch position (the position at which the
- // user started the selection)
- int lastDownOffset = buffer.getSpanStart(LAST_TAP_DOWN);
-
- // Compute the selection boundaries
- int spanstart;
- int spanend;
- if (offset >= lastDownOffset) {
- // Expand from word start of the original tap to new word
- // end, since we are selecting "forwards"
- spanstart = findWordStart(buffer, lastDownOffset);
- spanend = findWordEnd(buffer, offset);
- } else {
- // Expand to from new word start to word end of the original
- // tap since we are selecting "backwards".
- // The spanend will always need to be associated with the touch
- // up position, so that refining the selection with the
- // trackball will work as expected.
- spanstart = findWordEnd(buffer, lastDownOffset);
- spanend = findWordStart(buffer, offset);
- }
- Selection.setSelection(buffer, spanstart, spanend);
- } else {
- Selection.extendSelection(buffer, offset);
- }
+ Selection.extendSelection(buffer, offset);
return true;
}
} else if (event.getAction() == MotionEvent.ACTION_UP) {
@@ -359,70 +255,17 @@
// the current scroll offset to avoid the scroll jumping later
// to show it.
if ((initialScrollY >= 0 && initialScrollY != widget.getScrollY()) ||
- (initialScrollX >= 0 && initialScrollX != widget.getScrollX())) {
+ (initialScrollX >= 0 && initialScrollX != widget.getScrollX())) {
widget.moveCursorToVisibleOffset();
return true;
}
- int x = (int) event.getX();
- int y = (int) event.getY();
- int off = getOffset(x, y, widget);
-
- // XXX should do the same adjust for x as we do for the line.
-
- OnePointFiveTapState[] onepointfivetap = buffer.getSpans(0, buffer.length(),
- OnePointFiveTapState.class);
- if (onepointfivetap.length > 0 && onepointfivetap[0].active &&
- Selection.getSelectionStart(buffer) == Selection.getSelectionEnd(buffer)) {
- // If we've set select mode, because there was a onepointfivetap,
- // but there was no ensuing swipe gesture, undo the select mode
- // and remove reference to the last onepointfivetap.
- MetaKeyKeyListener.stopSelecting(widget, buffer);
- for (int i=0; i < onepointfivetap.length; i++) {
- buffer.removeSpan(onepointfivetap[i]);
- }
+ int offset = widget.getOffset((int) event.getX(), (int) event.getY());
+ if (isCap(buffer)) {
buffer.removeSpan(LAST_TAP_DOWN);
- }
- boolean cap = (MetaKeyKeyListener.getMetaState(buffer,
- KeyEvent.META_SHIFT_ON) == 1) ||
- (MetaKeyKeyListener.getMetaState(buffer,
- MetaKeyKeyListener.META_SELECTING) != 0);
-
- DoubleTapState[] tap = buffer.getSpans(0, buffer.length(),
- DoubleTapState.class);
- boolean doubletap = false;
-
- if (tap.length > 0) {
- if (event.getEventTime() - tap[0].mWhen <=
- ViewConfiguration.getDoubleTapTimeout() &&
- sameWord(buffer, off, Selection.getSelectionEnd(buffer))) {
-
- doubletap = true;
- }
-
- tap[0].mWhen = event.getEventTime();
+ Selection.extendSelection(buffer, offset);
} else {
- DoubleTapState newtap = new DoubleTapState();
- newtap.mWhen = event.getEventTime();
- buffer.setSpan(newtap, 0, buffer.length(),
- Spannable.SPAN_INCLUSIVE_INCLUSIVE);
- }
-
- if (cap) {
- buffer.removeSpan(LAST_TAP_DOWN);
- if (onepointfivetap.length > 0 && onepointfivetap[0].active) {
- // If we selecting something with the onepointfivetap-and
- // swipe gesture, stop it on finger up.
- MetaKeyKeyListener.stopSelecting(widget, buffer);
- } else {
- Selection.extendSelection(buffer, off);
- }
- } else if (doubletap) {
- Selection.setSelection(buffer,
- findWordStart(buffer, off),
- findWordEnd(buffer, off));
- } else {
- Selection.setSelection(buffer, off);
+ Selection.setSelection(buffer, offset);
}
MetaKeyKeyListener.adjustMetaAfterKeypress(buffer);
@@ -442,11 +285,10 @@
widget.cancelLongPress();
// Offset the current touch position (from controller to cursor)
- final int x = (int) event.getX() + mCursorController.getOffsetX();
- final int y = (int) event.getY() + mCursorController.getOffsetY();
- int offset = getOffset(x, y, widget);
- Selection.setSelection(buffer, offset);
- mCursorController.updatePosition();
+ final float x = event.getX() + mCursorController.getOffsetX();
+ final float y = event.getY() + mCursorController.getOffsetY();
+ int offset = widget.getOffset((int) x, (int) y);
+ mCursorController.updatePosition(offset);
return true;
case MotionEvent.ACTION_UP:
@@ -468,75 +310,6 @@
mCursorController = cursorController;
}
- private static class DoubleTapState implements NoCopySpan {
- long mWhen;
- }
-
- /* We check for a onepointfive tap. This is similar to
- * doubletap gesture (where a finger goes down, up, down, up, in a short
- * time period), except in the onepointfive tap, a users finger only needs
- * to go down, up, down in a short time period. We detect this type of tap
- * to implement the onepointfivetap-and-swipe selection gesture.
- * This gesture allows users to select a segment of text without going
- * through the "select text" option in the context menu.
- */
- private static class OnePointFiveTapState implements NoCopySpan {
- long mWhen;
- boolean active;
- }
-
- private static boolean sameWord(CharSequence text, int one, int two) {
- int start = findWordStart(text, one);
- int end = findWordEnd(text, one);
-
- if (end == start) {
- return false;
- }
-
- return start == findWordStart(text, two) &&
- end == findWordEnd(text, two);
- }
-
- // TODO: Unify with TextView.getWordForDictionary()
- private static int findWordStart(CharSequence text, int start) {
- for (; start > 0; start--) {
- char c = text.charAt(start - 1);
- int type = Character.getType(c);
-
- if (c != '\'' &&
- type != Character.UPPERCASE_LETTER &&
- type != Character.LOWERCASE_LETTER &&
- type != Character.TITLECASE_LETTER &&
- type != Character.MODIFIER_LETTER &&
- type != Character.DECIMAL_DIGIT_NUMBER) {
- break;
- }
- }
-
- return start;
- }
-
- // TODO: Unify with TextView.getWordForDictionary()
- private static int findWordEnd(CharSequence text, int end) {
- int len = text.length();
-
- for (; end < len; end++) {
- char c = text.charAt(end);
- int type = Character.getType(c);
-
- if (c != '\'' &&
- type != Character.UPPERCASE_LETTER &&
- type != Character.LOWERCASE_LETTER &&
- type != Character.TITLECASE_LETTER &&
- type != Character.MODIFIER_LETTER &&
- type != Character.DECIMAL_DIGIT_NUMBER) {
- break;
- }
- }
-
- return end;
- }
-
public boolean canSelectArbitrarily() {
return true;
}
@@ -573,8 +346,9 @@
}
public static MovementMethod getInstance() {
- if (sInstance == null)
+ if (sInstance == null) {
sInstance = new ArrowKeyMovementMethod();
+ }
return sInstance;
}
diff --git a/core/java/android/view/AbsSavedState.java b/core/java/android/view/AbsSavedState.java
index 840d7c1..6ad33dd 100644
--- a/core/java/android/view/AbsSavedState.java
+++ b/core/java/android/view/AbsSavedState.java
@@ -54,7 +54,7 @@
*/
protected AbsSavedState(Parcel source) {
// FIXME need class loader
- Parcelable superState = (Parcelable) source.readParcelable(null);
+ Parcelable superState = source.readParcelable(null);
mSuperState = superState != null ? superState : EMPTY_STATE;
}
@@ -75,7 +75,7 @@
= new Parcelable.Creator<AbsSavedState>() {
public AbsSavedState createFromParcel(Parcel in) {
- Parcelable superState = (Parcelable) in.readParcelable(null);
+ Parcelable superState = in.readParcelable(null);
if (superState != null) {
throw new IllegalStateException("superState must be null");
}
diff --git a/core/java/android/view/GLES20Canvas.java b/core/java/android/view/GLES20Canvas.java
index 49ef8dc..0ad3c0b 100644
--- a/core/java/android/view/GLES20Canvas.java
+++ b/core/java/android/view/GLES20Canvas.java
@@ -45,6 +45,10 @@
private final float[] mPoint = new float[2];
private final float[] mLine = new float[4];
+
+ private final Rect mClipBounds = new Rect();
+
+ private DrawFilter mFilter;
///////////////////////////////////////////////////////////////////////////
// Constructors
@@ -164,6 +168,7 @@
@Override
public boolean clipRect(Rect rect, Region.Op op) {
+ // TODO: Implement
throw new UnsupportedOperationException();
}
@@ -174,6 +179,7 @@
@Override
public boolean clipRect(RectF rect, Region.Op op) {
+ // TODO: Implement
throw new UnsupportedOperationException();
}
@@ -336,12 +342,14 @@
@Override
public void setDrawFilter(DrawFilter filter) {
- throw new UnsupportedOperationException();
+ // Don't crash, but ignore the draw filter
+ // TODO: Implement PaintDrawFilter
+ mFilter = filter;
}
@Override
public DrawFilter getDrawFilter() {
- throw new UnsupportedOperationException();
+ return mFilter;
}
///////////////////////////////////////////////////////////////////////////
@@ -408,7 +416,11 @@
@Override
public void drawBitmap(int[] colors, int offset, int stride, float x, float y,
int width, int height, boolean hasAlpha, Paint paint) {
- // TODO: Implement
+ final Bitmap.Config config = hasAlpha ? Bitmap.Config.ARGB_8888 : Bitmap.Config.RGB_565;
+ final Bitmap b = Bitmap.createBitmap(colors, offset, stride, width, height, config);
+ final int nativePaint = paint == null ? 0 : paint.mNativePaint;
+ nDrawBitmap(mRenderer, b.mNativeBitmap, x, y, nativePaint);
+ b.recycle();
}
@Override
@@ -420,7 +432,7 @@
@Override
public void drawBitmapMesh(Bitmap bitmap, int meshWidth, int meshHeight, float[] verts,
int vertOffset, int[] colors, int colorOffset, Paint paint) {
- throw new UnsupportedOperationException();
+ // TODO: Implement
}
@Override
@@ -466,7 +478,9 @@
@Override
public void drawPaint(Paint paint) {
- // TODO: Implement
+ final Rect r = mClipBounds;
+ nGetClipBounds(mRenderer, r);
+ drawRect(r.left, r.top, r.right, r.bottom, paint);
}
@Override
@@ -591,6 +605,6 @@
public void drawVertices(VertexMode mode, int vertexCount, float[] verts, int vertOffset,
float[] texs, int texOffset, int[] colors, int colorOffset, short[] indices,
int indexOffset, int indexCount, Paint paint) {
- throw new UnsupportedOperationException();
+ // TODO: Implement
}
}
diff --git a/core/java/android/view/KeyEvent.java b/core/java/android/view/KeyEvent.java
index 0bfb6d6..9c05008 100755
--- a/core/java/android/view/KeyEvent.java
+++ b/core/java/android/view/KeyEvent.java
@@ -124,11 +124,27 @@
public static final int KEYCODE_PAGE_DOWN = 93;
public static final int KEYCODE_PICTSYMBOLS = 94; // switch symbol-sets (Emoji,Kao-moji)
public static final int KEYCODE_SWITCH_CHARSET = 95; // switch char-sets (Kanji,Katakana)
+ public static final int KEYCODE_BUTTON_A = 96;
+ public static final int KEYCODE_BUTTON_B = 97;
+ public static final int KEYCODE_BUTTON_C = 98;
+ public static final int KEYCODE_BUTTON_X = 99;
+ public static final int KEYCODE_BUTTON_Y = 100;
+ public static final int KEYCODE_BUTTON_Z = 101;
+ public static final int KEYCODE_BUTTON_L1 = 102;
+ public static final int KEYCODE_BUTTON_R1 = 103;
+ public static final int KEYCODE_BUTTON_L2 = 104;
+ public static final int KEYCODE_BUTTON_R2 = 105;
+ public static final int KEYCODE_BUTTON_THUMBL = 106;
+ public static final int KEYCODE_BUTTON_THUMBR = 107;
+ public static final int KEYCODE_BUTTON_START = 108;
+ public static final int KEYCODE_BUTTON_SELECT = 109;
+ public static final int KEYCODE_BUTTON_MODE = 110;
// NOTE: If you add a new keycode here you must also add it to:
// isSystem()
// native/include/android/keycodes.h
// frameworks/base/include/ui/KeycodeLabels.h
+ // external/webkit/WebKit/android/plugins/ANPKeyCodes.h
// tools/puppet_master/PuppetMaster/nav_keys.py
// frameworks/base/core/res/res/values/attrs.xml
// commands/monkey/Monkey.java
diff --git a/core/java/android/view/MotionEvent.java b/core/java/android/view/MotionEvent.java
index ae8c21d..35e229a 100644
--- a/core/java/android/view/MotionEvent.java
+++ b/core/java/android/view/MotionEvent.java
@@ -722,7 +722,7 @@
*
* @param pointerId The identifier of the pointer to be found.
* @return Returns either the index of the pointer (for use with
- * {@link #getX(int) et al.), or -1 if there is no data available for
+ * {@link #getX(int)} et al.), or -1 if there is no data available for
* that pointer identifier.
*/
public final int findPointerIndex(int pointerId) {
diff --git a/core/java/android/view/SurfaceHolder.java b/core/java/android/view/SurfaceHolder.java
index 34e4638..0d38f7b 100644
--- a/core/java/android/view/SurfaceHolder.java
+++ b/core/java/android/view/SurfaceHolder.java
@@ -119,6 +119,23 @@
}
/**
+ * Additional callbacks that can be received for {@link Callback}.
+ */
+ public interface Callback2 extends Callback {
+ /**
+ * Called when the application needs to redraw the content of its
+ * surface, after it is resized or for some other reason. By not
+ * returning here until the redraw is complete, you can ensure that
+ * the user will not see your surface in a bad state (at its new
+ * size before it has been correctly drawn that way). This will
+ * typically be preceeded by a call to {@link #surfaceChanged}.
+ *
+ * @param holder The SurfaceHolder whose surface has changed.
+ */
+ public void surfaceRedrawNeeded(SurfaceHolder holder);
+ }
+
+ /**
* Add a Callback interface for this holder. There can several Callback
* interfaces associated to a holder.
*
diff --git a/core/java/android/view/SurfaceView.java b/core/java/android/view/SurfaceView.java
index e4d1ae1..54cb4ca 100644
--- a/core/java/android/view/SurfaceView.java
+++ b/core/java/android/view/SurfaceView.java
@@ -123,7 +123,7 @@
handleGetNewSurface();
} break;
case UPDATE_WINDOW_MSG: {
- updateWindow(false);
+ updateWindow(false, false);
} break;
}
}
@@ -132,7 +132,7 @@
final ViewTreeObserver.OnScrollChangedListener mScrollChangedListener
= new ViewTreeObserver.OnScrollChangedListener() {
public void onScrollChanged() {
- updateWindow(false);
+ updateWindow(false, false);
}
};
@@ -210,7 +210,7 @@
super.onWindowVisibilityChanged(visibility);
mWindowVisibility = visibility == VISIBLE;
mRequestedVisible = mWindowVisibility && mViewVisibility;
- updateWindow(false);
+ updateWindow(false, false);
}
@Override
@@ -218,7 +218,7 @@
super.setVisibility(visibility);
mViewVisibility = visibility == VISIBLE;
mRequestedVisible = mWindowVisibility && mViewVisibility;
- updateWindow(false);
+ updateWindow(false, false);
}
/**
@@ -232,7 +232,7 @@
*/
protected void showSurface() {
if (mSession != null) {
- updateWindow(true);
+ updateWindow(true, false);
}
}
@@ -265,7 +265,7 @@
protected void onDetachedFromWindow() {
getViewTreeObserver().removeOnScrollChangedListener(mScrollChangedListener);
mRequestedVisible = false;
- updateWindow(false);
+ updateWindow(false, false);
mHaveFrame = false;
if (mWindow != null) {
try {
@@ -290,7 +290,7 @@
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
- updateWindow(false);
+ updateWindow(false, false);
}
@Override
@@ -343,7 +343,7 @@
}
// reposition ourselves where the surface is
mHaveFrame = true;
- updateWindow(false);
+ updateWindow(false, false);
super.dispatchDraw(canvas);
}
@@ -397,7 +397,7 @@
mWindowType = type;
}
- private void updateWindow(boolean force) {
+ private void updateWindow(boolean force, boolean redrawNeeded) {
if (!mHaveFrame) {
return;
}
@@ -425,7 +425,7 @@
final boolean typeChanged = mType != mRequestedType;
if (force || creating || formatChanged || sizeChanged || visibleChanged
|| typeChanged || mLeft != mLocation[0] || mTop != mLocation[1]
- || mUpdateWindowNeeded || mReportDrawNeeded) {
+ || mUpdateWindowNeeded || mReportDrawNeeded || redrawNeeded) {
if (localLOGV) Log.i(TAG, "Changes: creating=" + creating
+ " format=" + formatChanged + " size=" + sizeChanged
@@ -524,6 +524,8 @@
}
try {
+ redrawNeeded |= creating | reportDrawNeeded;
+
if (visible) {
mDestroyReportNeeded = true;
@@ -545,12 +547,20 @@
c.surfaceChanged(mSurfaceHolder, mFormat, myWidth, myHeight);
}
}
+ if (redrawNeeded) {
+ for (SurfaceHolder.Callback c : callbacks) {
+ if (c instanceof SurfaceHolder.Callback2) {
+ ((SurfaceHolder.Callback2)c).surfaceRedrawNeeded(
+ mSurfaceHolder);
+ }
+ }
+ }
} else {
mSurface.release();
}
} finally {
mIsCreating = false;
- if (creating || reportDrawNeeded) {
+ if (redrawNeeded) {
mSession.finishDrawing(mWindow);
}
}
@@ -580,7 +590,7 @@
void handleGetNewSurface() {
mNewSurfaceNeeded = true;
- updateWindow(false);
+ updateWindow(false, false);
}
/**
@@ -696,7 +706,7 @@
mRequestedFormat = format;
if (mWindow != null) {
- updateWindow(false);
+ updateWindow(false, false);
}
}
@@ -713,7 +723,7 @@
case SURFACE_TYPE_PUSH_BUFFERS:
mRequestedType = type;
if (mWindow != null) {
- updateWindow(false);
+ updateWindow(false, false);
}
break;
}
diff --git a/core/java/android/view/View.java b/core/java/android/view/View.java
index 05d380e..a529aea 100644
--- a/core/java/android/view/View.java
+++ b/core/java/android/view/View.java
@@ -16,6 +16,7 @@
package android.view;
+import android.graphics.RectF;
import com.android.internal.R;
import com.android.internal.view.menu.MenuBuilder;
@@ -1585,6 +1586,87 @@
int mViewFlags;
/**
+ * The transform matrix for the View. This transform is calculated internally
+ * based on the rotation, scaleX, and scaleY properties. The identity matrix
+ * is used by default. Do *not* use this variable directly; instead call
+ * getMatrix(), which will automatically recalculate the matrix if necessary
+ * to get the correct matrix based on the latest rotation and scale properties.
+ */
+ private final Matrix mMatrix = new Matrix();
+
+ /**
+ * The transform matrix for the View. This transform is calculated internally
+ * based on the rotation, scaleX, and scaleY properties. The identity matrix
+ * is used by default. Do *not* use this variable directly; instead call
+ * getMatrix(), which will automatically recalculate the matrix if necessary
+ * to get the correct matrix based on the latest rotation and scale properties.
+ */
+ private Matrix mInverseMatrix;
+
+ /**
+ * An internal variable that tracks whether we need to recalculate the
+ * transform matrix, based on whether the rotation or scaleX/Y properties
+ * have changed since the matrix was last calculated.
+ */
+ private boolean mMatrixDirty = false;
+
+ /**
+ * An internal variable that tracks whether we need to recalculate the
+ * transform matrix, based on whether the rotation or scaleX/Y properties
+ * have changed since the matrix was last calculated.
+ */
+ private boolean mInverseMatrixDirty = true;
+
+ /**
+ * A variable that tracks whether we need to recalculate the
+ * transform matrix, based on whether the rotation or scaleX/Y properties
+ * have changed since the matrix was last calculated. This variable
+ * is only valid after a call to getMatrix().
+ */
+ boolean mMatrixIsIdentity = true;
+
+ /**
+ * The degrees rotation around the pivot point.
+ */
+ @ViewDebug.ExportedProperty
+ private float mRotation = 0f;
+
+ /**
+ * The amount of scale in the x direction around the pivot point. A
+ * value of 1 means no scaling is applied.
+ */
+ @ViewDebug.ExportedProperty
+ private float mScaleX = 1f;
+
+ /**
+ * The amount of scale in the y direction around the pivot point. A
+ * value of 1 means no scaling is applied.
+ */
+ @ViewDebug.ExportedProperty
+ private float mScaleY = 1f;
+
+ /**
+ * The amount of scale in the x direction around the pivot point. A
+ * value of 1 means no scaling is applied.
+ */
+ @ViewDebug.ExportedProperty
+ private float mPivotX = 0f;
+
+ /**
+ * The amount of scale in the y direction around the pivot point. A
+ * value of 1 means no scaling is applied.
+ */
+ @ViewDebug.ExportedProperty
+ private float mPivotY = 0f;
+
+ /**
+ * The opacity of the View. This is a value from 0 to 1, where 0 means
+ * completely transparent and 1 means completely opaque.
+ */
+ @ViewDebug.ExportedProperty
+ private float mAlpha = 1f;
+
+ /**
* The distance in pixels from the left edge of this view's parent
* to the left edge of this view.
* {@hide}
@@ -3019,6 +3101,16 @@
}
/**
+ * Determine if this view has the FITS_SYSTEM_WINDOWS flag set.
+ * @return True if window has FITS_SYSTEM_WINDOWS set
+ *
+ * @hide
+ */
+ public boolean isFitsSystemWindowsFlagSet() {
+ return (mViewFlags & FITS_SYSTEM_WINDOWS) == FITS_SYSTEM_WINDOWS;
+ }
+
+ /**
* Returns the visibility status for this view.
*
* @return One of {@link #VISIBLE}, {@link #INVISIBLE}, or {@link #GONE}.
@@ -4406,9 +4498,7 @@
final int y = (int) event.getY();
// Be lenient about moving outside of buttons
- int slop = mTouchSlop;
- if ((x < 0 - slop) || (x >= getWidth() + slop) ||
- (y < 0 - slop) || (y >= getHeight() + slop)) {
+ if (!pointInView(x, y, mTouchSlop)) {
// Outside button
removeTapCallback();
if ((mPrivateFlags & PRESSED) != 0) {
@@ -4754,6 +4844,234 @@
}
/**
+ * The transform matrix of this view, which is calculated based on the current
+ * roation, scale, and pivot properties.
+ *
+ * @see #getRotation()
+ * @see #getScaleX()
+ * @see #getScaleY()
+ * @see #getPivotX()
+ * @see #getPivotY()
+ * @return The current transform matrix for the view
+ */
+ public Matrix getMatrix() {
+ if (mMatrixDirty) {
+ // transform-related properties have changed since the last time someone
+ // asked for the matrix; recalculate it with the current values
+ mMatrix.reset();
+ mMatrix.setRotate(mRotation, mPivotX, mPivotY);
+ mMatrix.preScale(mScaleX, mScaleY, mPivotX, mPivotY);
+ mMatrixDirty = false;
+ mMatrixIsIdentity = mMatrix.isIdentity();
+ mInverseMatrixDirty = true;
+ }
+ return mMatrix;
+ }
+
+ /**
+ * Utility method to retrieve the inverse of the current mMatrix property.
+ * We cache the matrix to avoid recalculating it when transform properties
+ * have not changed.
+ *
+ * @return The inverse of the current matrix of this view.
+ */
+ Matrix getInverseMatrix() {
+ if (mInverseMatrixDirty) {
+ if (mInverseMatrix == null) {
+ mInverseMatrix = new Matrix();
+ }
+ mMatrix.invert(mInverseMatrix);
+ mInverseMatrixDirty = false;
+ }
+ return mInverseMatrix;
+ }
+
+ /**
+ * The degrees that the view is rotated around the pivot point.
+ *
+ * @see #getPivotX()
+ * @see #getPivotY()
+ * @return The degrees of rotation.
+ */
+ public float getRotation() {
+ return mRotation;
+ }
+
+ /**
+ * Sets the degrees that the view is rotated around the pivot point.
+ *
+ * @param rotation The degrees of rotation.
+ * @see #getPivotX()
+ * @see #getPivotY()
+ */
+ public void setRotation(float rotation) {
+ if (mRotation != rotation) {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ mRotation = rotation;
+ mMatrixDirty = true;
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+
+ /**
+ * The amount that the view is scaled in x around the pivot point, as a proportion of
+ * the view's unscaled width. A value of 1, the default, means that no scaling is applied.
+ *
+ * @default 1.0f
+ * @see #getPivotX()
+ * @see #getPivotY()
+ * @return The scaling factor.
+ */
+ public float getScaleX() {
+ return mScaleX;
+ }
+
+ /**
+ * Sets the amount that the view is scaled in x around the pivot point, as a proportion of
+ * the view's unscaled width. A value of 1 means that no scaling is applied.
+ *
+ * @param scaleX The scaling factor.
+ * @see #getPivotX()
+ * @see #getPivotY()
+ */
+ public void setScaleX(float scaleX) {
+ if (mScaleX != scaleX) {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ mScaleX = scaleX;
+ mMatrixDirty = true;
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+
+ /**
+ * The amount that the view is scaled in y around the pivot point, as a proportion of
+ * the view's unscaled height. A value of 1, the default, means that no scaling is applied.
+ *
+ * @default 1.0f
+ * @see #getPivotX()
+ * @see #getPivotY()
+ * @return The scaling factor.
+ */
+ public float getScaleY() {
+ return mScaleY;
+ }
+
+ /**
+ * Sets the amount that the view is scaled in Y around the pivot point, as a proportion of
+ * the view's unscaled width. A value of 1 means that no scaling is applied.
+ *
+ * @param scaleY The scaling factor.
+ * @see #getPivotX()
+ * @see #getPivotY()
+ */
+ public void setScaleY(float scaleY) {
+ if (mScaleY != scaleY) {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ mScaleY = scaleY;
+ mMatrixDirty = true;
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+
+ /**
+ * The x location of the point around which the view is {@link #setRotation(float) rotated}
+ * and {@link #setScaleX(float) scaled}.
+ *
+ * @see #getRotation()
+ * @see #getScaleX()
+ * @see #getScaleY()
+ * @see #getPivotY()
+ * @return The x location of the pivot point.
+ */
+ public float getPivotX() {
+ return mPivotX;
+ }
+
+ /**
+ * Sets the x location of the point around which the view is
+ * {@link #setRotation(float) rotated} and {@link #setScaleX(float) scaled}.
+ *
+ * @param pivotX The x location of the pivot point.
+ * @see #getRotation()
+ * @see #getScaleX()
+ * @see #getScaleY()
+ * @see #getPivotY()
+ */
+ public void setPivotX(float pivotX) {
+ if (mPivotX != pivotX) {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ mPivotX = pivotX;
+ mMatrixDirty = true;
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+
+ /**
+ * The y location of the point around which the view is {@link #setRotation(float) rotated}
+ * and {@link #setScaleY(float) scaled}.
+ *
+ * @see #getRotation()
+ * @see #getScaleX()
+ * @see #getScaleY()
+ * @see #getPivotY()
+ * @return The y location of the pivot point.
+ */
+ public float getPivotY() {
+ return mPivotY;
+ }
+
+ /**
+ * Sets the y location of the point around which the view is {@link #setRotation(float) rotated}
+ * and {@link #setScaleY(float) scaled}.
+ *
+ * @param pivotY The y location of the pivot point.
+ * @see #getRotation()
+ * @see #getScaleX()
+ * @see #getScaleY()
+ * @see #getPivotY()
+ */
+ public void setPivotY(float pivotY) {
+ if (mPivotY != pivotY) {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ mPivotY = pivotY;
+ mMatrixDirty = true;
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+
+ /**
+ * The opacity of the view. This is a value from 0 to 1, where 0 means the view is
+ * completely transparent and 1 means the view is completely opaque.
+ *
+ * @default 1.0f
+ * @return The opacity of the view.
+ */
+ public float getAlpha() {
+ return mAlpha;
+ }
+
+ /**
+ * Sets the opacity of the view. This is a value from 0 to 1, where 0 means the view is
+ * completely transparent and 1 means the view is completely opaque.
+ *
+ * @param alpha The opacity of the view.
+ */
+ public void setAlpha(float alpha) {
+ mAlpha = alpha;
+ invalidate();
+ }
+
+ /**
* Top position of this view relative to its parent.
*
* @return The top of this view, in pixels.
@@ -4764,6 +5082,37 @@
}
/**
+ * Sets the top position of this view relative to its parent.
+ *
+ * @param top The top of this view, in pixels.
+ */
+ public final void setTop(int top) {
+ if (top != mTop) {
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity) {
+ final ViewParent p = mParent;
+ if (p != null && mAttachInfo != null) {
+ final int[] location = mAttachInfo.mInvalidateChildLocation;
+ final Rect r = mAttachInfo.mTmpInvalRect;
+ int minTop = Math.min(mTop, top);
+ location[0] = mLeft;
+ location[1] = minTop;
+ r.set(0, 0, mRight - mLeft, mBottom - minTop);
+ p.invalidateChildInParent(location, r);
+ }
+ } else {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ }
+ mTop = top;
+ if (!mMatrixIsIdentity) {
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+ }
+
+ /**
* Bottom position of this view relative to its parent.
*
* @return The bottom of this view, in pixels.
@@ -4774,6 +5123,37 @@
}
/**
+ * Sets the bottom position of this view relative to its parent.
+ *
+ * @param bottom The bottom of this view, in pixels.
+ */
+ public final void setBottom(int bottom) {
+ if (bottom != mBottom) {
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity) {
+ final ViewParent p = mParent;
+ if (p != null && mAttachInfo != null) {
+ final int[] location = mAttachInfo.mInvalidateChildLocation;
+ final Rect r = mAttachInfo.mTmpInvalRect;
+ int maxBottom = Math.max(mBottom, bottom);
+ location[0] = mLeft;
+ location[1] = mTop;
+ r.set(0, 0, mRight - mLeft, maxBottom - mTop);
+ p.invalidateChildInParent(location, r);
+ }
+ } else {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ }
+ mBottom = bottom;
+ if (!mMatrixIsIdentity) {
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+ }
+
+ /**
* Left position of this view relative to its parent.
*
* @return The left edge of this view, in pixels.
@@ -4784,6 +5164,37 @@
}
/**
+ * Sets the left position of this view relative to its parent.
+ *
+ * @param left The bottom of this view, in pixels.
+ */
+ public final void setLeft(int left) {
+ if (left != mLeft) {
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity) {
+ final ViewParent p = mParent;
+ if (p != null && mAttachInfo != null) {
+ final int[] location = mAttachInfo.mInvalidateChildLocation;
+ final Rect r = mAttachInfo.mTmpInvalRect;
+ int minLeft = Math.min(mLeft, left);
+ location[0] = minLeft;
+ location[1] = mTop;
+ r.set(0, 0, mRight - minLeft, mBottom - mTop);
+ p.invalidateChildInParent(location, r);
+ }
+ } else {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ }
+ mLeft = left;
+ if (!mMatrixIsIdentity) {
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+ }
+
+ /**
* Right position of this view relative to its parent.
*
* @return The right edge of this view, in pixels.
@@ -4794,12 +5205,149 @@
}
/**
+ * Sets the right position of this view relative to its parent.
+ *
+ * @param right The bottom of this view, in pixels.
+ */
+ public final void setRight(int right) {
+ if (right != mRight) {
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity) {
+ final ViewParent p = mParent;
+ if (p != null && mAttachInfo != null) {
+ final int[] location = mAttachInfo.mInvalidateChildLocation;
+ final Rect r = mAttachInfo.mTmpInvalRect;
+ int maxRight = Math.max(mRight, right);
+ location[0] = mLeft;
+ location[1] = mTop;
+ r.set(0, 0, maxRight - mLeft, mBottom - mTop);
+ p.invalidateChildInParent(location, r);
+ }
+ } else {
+ // Double-invalidation is necessary to capture view's old and new areas
+ invalidate();
+ }
+ mRight = right;
+ if (!mMatrixIsIdentity) {
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
+ }
+
+ /**
+ * The horizontal location of this view relative to its parent. This value is equivalent to the
+ * {@link #getLeft() left} property.
+ *
+ * @return The horizontal position of this view, in pixels.
+ */
+ public int getX() {
+ return mLeft;
+ }
+
+ /**
+ * Sets the horizontal location of this view relative to its parent. Setting this value will
+ * affect both the {@link #setLeft(int) left} and {@link #setRight(int) right} properties
+ * of this view.
+ *
+ * @param x The horizontal position of this view, in pixels.
+ */
+ public void setX(int x) {
+ offsetLeftAndRight(x - mLeft);
+ }
+
+ /**
+ * The vertical location of this view relative to its parent. This value is equivalent to the
+ * {@link #getTop() left} property.
+ *
+ * @return The vertical position of this view, in pixels.
+ */
+ public int getY() {
+ return mTop;
+ }
+
+ /**
+ * Sets the vertical location of this view relative to its parent. Setting this value will
+ * affect both the {@link #setTop(int) left} and {@link #setBottom(int) right} properties
+ * of this view.
+ *
+ * @param y The vertical position of this view, in pixels.
+ */
+ public void setY(int y) {
+ offsetTopAndBottom(y - mTop);
+ }
+
+ /**
* Hit rectangle in parent's coordinates
*
* @param outRect The hit rectangle of the view.
*/
public void getHitRect(Rect outRect) {
- outRect.set(mLeft, mTop, mRight, mBottom);
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity || mAttachInfo == null) {
+ outRect.set(mLeft, mTop, mRight, mBottom);
+ } else {
+ final RectF tmpRect = mAttachInfo.mTmpTransformRect;
+ tmpRect.set(-mPivotX, -mPivotY,
+ getWidth() - mPivotX, getHeight() - mPivotY);
+ m.mapRect(tmpRect);
+ outRect.set((int)tmpRect.left + mLeft, (int)tmpRect.top + mTop,
+ (int)tmpRect.right + mLeft, (int)tmpRect.bottom + mTop);
+ }
+ }
+
+ /**
+ * This method detects whether the given event is inside the view and, if so,
+ * handles it via the dispatchEvent(MotionEvent) method.
+ *
+ * @param ev The event that is being dispatched.
+ * @param parentX The x location of the event in the parent's coordinates.
+ * @param parentY The y location of the event in the parent's coordinates.
+ * @return true if the event was inside this view, false otherwise.
+ */
+ boolean dispatchTouchEvent(MotionEvent ev, float parentX, float parentY) {
+ float localX = parentX - mLeft;
+ float localY = parentY - mTop;
+ Matrix m = getMatrix();
+ if (!mMatrixIsIdentity && mAttachInfo != null) {
+ // non-identity matrix: transform the point into the view's coordinates
+ final float[] localXY = mAttachInfo.mTmpTransformLocation;
+ localXY[0] = localX;
+ localXY[1] = localY;
+ getInverseMatrix().mapPoints(localXY);
+ localX = localXY[0];
+ localY = localXY[1];
+ }
+ if (localX >= 0 && localY >= 0 &&
+ localX < (mRight - mLeft) && localY < (mBottom - mTop)) {
+ ev.setLocation(localX, localY);
+ return dispatchTouchEvent(ev);
+ }
+ return false;
+ }
+
+ /**
+ * Utility method to determine whether the given point, in local coordinates,
+ * is inside the view, where the area of the view is expanded by the slop factor.
+ * This method is called while processing touch-move events to determine if the event
+ * is still within the view.
+ */
+ private boolean pointInView(float localX, float localY, float slop) {
+ Matrix m = getMatrix();
+ if (!mMatrixIsIdentity && mAttachInfo != null) {
+ // non-identity matrix: transform the point into the view's coordinates
+ final float[] localXY = mAttachInfo.mTmpTransformLocation;
+ localXY[0] = localX;
+ localXY[1] = localY;
+ getInverseMatrix().mapPoints(localXY);
+ localX = localXY[0];
+ localY = localXY[1];
+ }
+ if (localX > -slop && localY > -slop &&
+ localX < ((mRight - mLeft) + slop) && localY < ((mBottom - mTop) + slop)) {
+ return true;
+ }
+ return false;
}
/**
@@ -4862,8 +5410,30 @@
* @param offset the number of pixels to offset the view by
*/
public void offsetTopAndBottom(int offset) {
- mTop += offset;
- mBottom += offset;
+ if (offset != 0) {
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity) {
+ final ViewParent p = mParent;
+ if (p != null && mAttachInfo != null) {
+ final int[] location = mAttachInfo.mInvalidateChildLocation;
+ final Rect r = mAttachInfo.mTmpInvalRect;
+ int minTop = offset < 0 ? mTop + offset : mTop;
+ int maxBottom = offset < 0 ? mBottom : mBottom + offset;
+ location[0] = mLeft;
+ location[1] = minTop;
+ r.set(0, 0, mRight - mLeft, maxBottom - minTop);
+ p.invalidateChildInParent(location, r);
+ }
+ } else {
+ invalidate();
+ }
+ mTop += offset;
+ mBottom += offset;
+ if (!mMatrixIsIdentity) {
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
}
/**
@@ -4872,8 +5442,30 @@
* @param offset the numer of pixels to offset the view by
*/
public void offsetLeftAndRight(int offset) {
- mLeft += offset;
- mRight += offset;
+ if (offset != 0) {
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity) {
+ final ViewParent p = mParent;
+ if (p != null && mAttachInfo != null) {
+ final int[] location = mAttachInfo.mInvalidateChildLocation;
+ final Rect r = mAttachInfo.mTmpInvalRect;
+ int minLeft = offset < 0 ? mLeft + offset : mLeft;
+ int maxRight = offset < 0 ? mRight : mRight + offset;
+ location[0] = minLeft;
+ location[1] = mTop;
+ r.set(0, 0, maxRight - minLeft, mBottom - mTop);
+ p.invalidateChildInParent(location, r);
+ }
+ } else {
+ invalidate();
+ }
+ mLeft += offset;
+ mRight += offset;
+ if (!mMatrixIsIdentity) {
+ mPrivateFlags |= DRAWN; // force another invalidation with the new orientation
+ invalidate();
+ }
+ }
}
/**
@@ -9191,6 +9783,13 @@
*/
final int[] mInvalidateChildLocation = new int[2];
+
+ /**
+ * Global to the view hierarchy used as a temporary for dealing with
+ * x/y location when view is transformed.
+ */
+ final float[] mTmpTransformLocation = new float[2];
+
/**
* The view tree observer used to dispatch global events like
* layout, pre-draw, touch mode change, etc.
@@ -9227,6 +9826,16 @@
final Rect mTmpInvalRect = new Rect();
/**
+ * Temporary for use in computing hit areas with transformed views
+ */
+ final RectF mTmpTransformRect = new RectF();
+
+ /**
+ * Temporary for use in computing invalidation areas with transformed views
+ */
+ final float[] mTmpTransformBounds = new float[8];
+
+ /**
* Temporary list for use in collecting focusable descendents of a view.
*/
final ArrayList<View> mFocusablesTempList = new ArrayList<View>(24);
diff --git a/core/java/android/view/ViewGroup.java b/core/java/android/view/ViewGroup.java
index 34777ce..22ad27a 100644
--- a/core/java/android/view/ViewGroup.java
+++ b/core/java/android/view/ViewGroup.java
@@ -16,6 +16,7 @@
package android.view;
+import android.graphics.Matrix;
import com.android.internal.R;
import android.content.Context;
@@ -867,21 +868,10 @@
final View child = children[i];
if ((child.mViewFlags & VISIBILITY_MASK) == VISIBLE
|| child.getAnimation() != null) {
- child.getHitRect(frame);
- if (frame.contains(scrolledXInt, scrolledYInt)) {
- // offset the event to the view's coordinate system
- final float xc = scrolledXFloat - child.mLeft;
- final float yc = scrolledYFloat - child.mTop;
- ev.setLocation(xc, yc);
+ if (child.dispatchTouchEvent(ev, scrolledXFloat, scrolledYFloat)) {
child.mPrivateFlags &= ~CANCEL_NEXT_UP_EVENT;
- if (child.dispatchTouchEvent(ev)) {
- // Event handled, we have a target now.
- mMotionTarget = child;
- return true;
- }
- // The event didn't get handled, try the next view.
- // Don't reset the event's location, it's not
- // necessary here.
+ mMotionTarget = child;
+ return true;
}
}
}
@@ -937,8 +927,21 @@
// finally offset the event to the target's coordinate system and
// dispatch the event.
- final float xc = scrolledXFloat - (float) target.mLeft;
- final float yc = scrolledYFloat - (float) target.mTop;
+ float xc;
+ float yc;
+ Matrix m = getMatrix();
+ if (mMatrixIsIdentity || mAttachInfo == null) {
+ xc = scrolledXFloat - (float) target.mLeft;
+ yc = scrolledYFloat - (float) target.mTop;
+ } else {
+ // non-identity matrix: transform the point into the view's coordinates
+ final float[] localXY = mAttachInfo.mTmpTransformLocation;
+ localXY[0] = scrolledXFloat;
+ localXY[1] = scrolledYFloat;
+ getInverseMatrix().mapPoints(localXY);
+ xc = localXY[0] - (float) target.mLeft;
+ yc = localXY[1] - (float) target.mTop;
+ }
ev.setLocation(xc, yc);
if ((target.mPrivateFlags & CANCEL_NEXT_UP_EVENT) != 0) {
@@ -1609,25 +1612,36 @@
}
}
- float alpha = 1.0f;
+ float alpha = child.getAlpha();
+ Matrix childMatrix = child.getMatrix();
- if (transformToApply != null) {
- if (concatMatrix) {
- int transX = 0;
- int transY = 0;
- if (hasNoCache) {
- transX = -sx;
- transY = -sy;
- }
- // Undo the scroll translation, apply the transformation matrix,
- // then redo the scroll translate to get the correct result.
- canvas.translate(-transX, -transY);
- canvas.concat(transformToApply.getMatrix());
- canvas.translate(transX, transY);
- mGroupFlags |= FLAG_CLEAR_TRANSFORMATION;
+ if (transformToApply != null || alpha < 1.0f || !child.mMatrixIsIdentity) {
+ int transX = 0;
+ int transY = 0;
+ if (hasNoCache) {
+ transX = -sx;
+ transY = -sy;
}
-
- alpha = transformToApply.getAlpha();
+ if (transformToApply != null) {
+ if (concatMatrix) {
+ // Undo the scroll translation, apply the transformation matrix,
+ // then redo the scroll translate to get the correct result.
+ canvas.translate(-transX, -transY);
+ canvas.concat(transformToApply.getMatrix());
+ canvas.translate(transX, transY);
+ mGroupFlags |= FLAG_CLEAR_TRANSFORMATION;
+ }
+ float transformAlpha = transformToApply.getAlpha();
+ if (transformAlpha < 1.0f) {
+ alpha *= transformToApply.getAlpha();
+ mGroupFlags |= FLAG_CLEAR_TRANSFORMATION;
+ }
+ }
+ if (!child.mMatrixIsIdentity) {
+ canvas.translate(-transX, -transY);
+ canvas.concat(child.getMatrix());
+ canvas.translate(transX, transY);
+ }
if (alpha < 1.0f) {
mGroupFlags |= FLAG_CLEAR_TRANSFORMATION;
}
@@ -2498,6 +2512,41 @@
final int[] location = attachInfo.mInvalidateChildLocation;
location[CHILD_LEFT_INDEX] = child.mLeft;
location[CHILD_TOP_INDEX] = child.mTop;
+ Matrix childMatrix = child.getMatrix();
+ if (!childMatrix.isIdentity()) {
+ float[] boundingRectPoints = attachInfo.mTmpTransformBounds;
+ boundingRectPoints[0] = dirty.left; // upper left
+ boundingRectPoints[1] = dirty.top;
+ boundingRectPoints[2] = dirty.right; // upper right
+ boundingRectPoints[3] = dirty.top;
+ boundingRectPoints[4] = dirty.right; // lower right
+ boundingRectPoints[5] = dirty.bottom;
+ boundingRectPoints[6] = dirty.left; // lower left
+ boundingRectPoints[7] = dirty.bottom;
+ childMatrix.mapPoints(boundingRectPoints);
+ // find the mind/max points to get the bounding rect
+ float left = Float.MAX_VALUE;
+ float top = Float.MAX_VALUE;
+ float right = -Float.MAX_VALUE;
+ float bottom = -Float.MAX_VALUE;
+ for (int i = 0; i < 8; i += 2) {
+ float x = boundingRectPoints[i];
+ float y = boundingRectPoints[i+1];
+ if (x < left) {
+ left = x;
+ }
+ if (x > right) {
+ right = x;
+ }
+ if (y < top) {
+ top = y;
+ }
+ if (y > bottom) {
+ bottom = y;
+ }
+ }
+ dirty.set((int)left, (int)top, (int)(right + .5f), (int)(bottom + .5f));
+ }
// If the child is drawing an animation, we want to copy this flag onto
// ourselves and the parent to make sure the invalidate request goes
@@ -2532,6 +2581,39 @@
}
parent = parent.invalidateChildInParent(location, dirty);
+ Matrix m = getMatrix();
+ if (!m.isIdentity()) {
+ float[] boundingRectPoints = {
+ dirty.left - mLeft, dirty.top - mTop, // upper left
+ dirty.right - mLeft, dirty.top - mTop, // upper right
+ dirty.right - mLeft, dirty.bottom - mTop, // lower right
+ dirty.left - mLeft, dirty.bottom - mTop // lower left
+ };
+ m.mapPoints(boundingRectPoints);
+ // find the mind/max points to get the bounding rect
+ float left = Float.MAX_VALUE;
+ float top = Float.MAX_VALUE;
+ float right = Float.MIN_VALUE;
+ float bottom = Float.MIN_VALUE;
+ for (int i = 0; i < 8; i += 2) {
+ float x = boundingRectPoints[i];
+ float y = boundingRectPoints[i+1];
+ if (x < left) {
+ left = x;
+ }
+ if (x > right) {
+ right = x;
+ }
+ if (y < top) {
+ top = y;
+ }
+ if (y > bottom) {
+ bottom = y;
+ }
+ }
+ dirty.set((int)left + mLeft, (int)top + mTop, (int)(right + .5f) + mLeft,
+ (int)(bottom + .5f) + mTop);
+ }
} while (parent != null);
}
}
diff --git a/core/java/android/view/ViewRoot.java b/core/java/android/view/ViewRoot.java
index 329226e..a89e7f6 100644
--- a/core/java/android/view/ViewRoot.java
+++ b/core/java/android/view/ViewRoot.java
@@ -130,7 +130,7 @@
int mViewVisibility;
boolean mAppVisible = true;
- SurfaceHolder.Callback mSurfaceHolderCallback;
+ SurfaceHolder.Callback2 mSurfaceHolderCallback;
BaseSurfaceHolder mSurfaceHolder;
boolean mIsCreating;
boolean mDrawingAllowed;
@@ -1152,6 +1152,18 @@
Log.v("ViewRoot", "FINISHED DRAWING: " + mWindowAttributes.getTitle());
}
mReportNextDraw = false;
+ if (mSurfaceHolder != null && mSurface.isValid()) {
+ mSurfaceHolderCallback.surfaceRedrawNeeded(mSurfaceHolder);
+ SurfaceHolder.Callback callbacks[] = mSurfaceHolder.getCallbacks();
+ if (callbacks != null) {
+ for (SurfaceHolder.Callback c : callbacks) {
+ if (c instanceof SurfaceHolder.Callback2) {
+ ((SurfaceHolder.Callback2)c).surfaceRedrawNeeded(
+ mSurfaceHolder);
+ }
+ }
+ }
+ }
try {
sWindowSession.finishDrawing(mWindow);
} catch (RemoteException e) {
diff --git a/core/java/android/view/Window.java b/core/java/android/view/Window.java
index 9b31b9c..be681cc 100644
--- a/core/java/android/view/Window.java
+++ b/core/java/android/view/Window.java
@@ -485,7 +485,7 @@
* to operate (such as for receiving input events). The given SurfaceHolder
* callback will be used to tell you about state changes to the surface.
*/
- public abstract void takeSurface(SurfaceHolder.Callback callback);
+ public abstract void takeSurface(SurfaceHolder.Callback2 callback);
/**
* Take ownership of this window's InputQueue. The window will no
diff --git a/core/java/android/webkit/BrowserFrame.java b/core/java/android/webkit/BrowserFrame.java
index 7b1aab2..b021ded 100644
--- a/core/java/android/webkit/BrowserFrame.java
+++ b/core/java/android/webkit/BrowserFrame.java
@@ -301,6 +301,18 @@
}
/**
+ * Saves the contents of the frame as a web archive.
+ *
+ * @param basename The filename where the archive should be placed.
+ * @param autoname If false, takes filename to be a file. If true, filename
+ * is assumed to be a directory in which a filename will be
+ * chosen according to the url of the current page.
+ */
+ /* package */ String saveWebArchive(String basename, boolean autoname) {
+ return nativeSaveWebArchive(basename, autoname);
+ }
+
+ /**
* Go back or forward the number of steps given.
* @param steps A negative or positive number indicating the direction
* and number of steps to move.
@@ -1040,5 +1052,7 @@
*/
private native HashMap getFormTextData();
+ private native String nativeSaveWebArchive(String basename, boolean autoname);
+
private native void nativeOrientationChanged(int orientation);
}
diff --git a/core/java/android/webkit/MimeTypeMap.java b/core/java/android/webkit/MimeTypeMap.java
index c1ac180..6e9c70a 100644
--- a/core/java/android/webkit/MimeTypeMap.java
+++ b/core/java/android/webkit/MimeTypeMap.java
@@ -363,6 +363,7 @@
sMimeTypeMap.loadEntry("application/x-wais-source", "src");
sMimeTypeMap.loadEntry("application/x-wingz", "wz");
sMimeTypeMap.loadEntry("application/x-webarchive", "webarchive");
+ sMimeTypeMap.loadEntry("application/x-webarchive-xml", "webarchivexml");
sMimeTypeMap.loadEntry("application/x-x509-ca-cert", "crt");
sMimeTypeMap.loadEntry("application/x-x509-user-cert", "crt");
sMimeTypeMap.loadEntry("application/x-xcf", "xcf");
diff --git a/core/java/android/webkit/WebView.java b/core/java/android/webkit/WebView.java
index 140dd55..0c8fc79 100644
--- a/core/java/android/webkit/WebView.java
+++ b/core/java/android/webkit/WebView.java
@@ -590,6 +590,7 @@
static final int SET_SCROLLBAR_MODES = 129;
static final int SELECTION_STRING_CHANGED = 130;
static final int SET_TOUCH_HIGHLIGHT_RECTS = 131;
+ static final int SAVE_WEBARCHIVE_FINISHED = 132;
private static final int FIRST_PACKAGE_MSG_ID = SCROLL_TO_MSG_ID;
private static final int LAST_PACKAGE_MSG_ID = SET_TOUCH_HIGHLIGHT_RECTS;
@@ -638,7 +639,8 @@
"REQUEST_KEYBOARD_WITH_SELECTION_MSG_ID", // = 128;
"SET_SCROLLBAR_MODES", // = 129;
"SELECTION_STRING_CHANGED", // = 130;
- "SET_TOUCH_HIGHLIGHT_RECTS" // = 131;
+ "SET_TOUCH_HIGHLIGHT_RECTS", // = 131;
+ "SAVE_WEBARCHIVE_FINISHED" // = 132;
};
// If the site doesn't use the viewport meta tag to specify the viewport,
@@ -1520,6 +1522,45 @@
}
/**
+ * Saves the current view as a web archive.
+ *
+ * @param filename The filename where the archive should be placed.
+ */
+ public void saveWebArchive(String filename) {
+ saveWebArchive(filename, false, null);
+ }
+
+ /* package */ static class SaveWebArchiveMessage {
+ SaveWebArchiveMessage (String basename, boolean autoname, ValueCallback<String> callback) {
+ mBasename = basename;
+ mAutoname = autoname;
+ mCallback = callback;
+ }
+
+ /* package */ final String mBasename;
+ /* package */ final boolean mAutoname;
+ /* package */ final ValueCallback<String> mCallback;
+ /* package */ String mResultFile;
+ }
+
+ /**
+ * Saves the current view as a web archive.
+ *
+ * @param basename The filename where the archive should be placed.
+ * @param autoname If false, takes basename to be a file. If true, basename
+ * is assumed to be a directory in which a filename will be
+ * chosen according to the url of the current page.
+ * @param callback Called after the web archive has been saved. The
+ * parameter for onReceiveValue will either be the filename
+ * under which the file was saved, or null if saving the
+ * file failed.
+ */
+ public void saveWebArchive(String basename, boolean autoname, ValueCallback<String> callback) {
+ mWebViewCore.sendMessage(EventHub.SAVE_WEBARCHIVE,
+ new SaveWebArchiveMessage(basename, autoname, callback));
+ }
+
+ /**
* Stop the current load.
*/
public void stopLoading() {
@@ -6435,6 +6476,13 @@
}
break;
+ case SAVE_WEBARCHIVE_FINISHED:
+ SaveWebArchiveMessage saveMessage = (SaveWebArchiveMessage)msg.obj;
+ if (saveMessage.mCallback != null) {
+ saveMessage.mCallback.onReceiveValue(saveMessage.mResultFile);
+ }
+ break;
+
default:
super.handleMessage(msg);
break;
diff --git a/core/java/android/webkit/WebViewCore.java b/core/java/android/webkit/WebViewCore.java
index db86a0b..21af570 100644
--- a/core/java/android/webkit/WebViewCore.java
+++ b/core/java/android/webkit/WebViewCore.java
@@ -774,6 +774,7 @@
"ON_RESUME", // = 144
"FREE_MEMORY", // = 145
"VALID_NODE_BOUNDS", // = 146
+ "SAVE_WEBARCHIVE", // = 147
};
class EventHub {
@@ -840,6 +841,9 @@
static final int FREE_MEMORY = 145;
static final int VALID_NODE_BOUNDS = 146;
+ // Load and save web archives
+ static final int SAVE_WEBARCHIVE = 147;
+
// Network-based messaging
static final int CLEAR_SSL_PREF_TABLE = 150;
@@ -1300,6 +1304,15 @@
nativeSetJsFlags((String)msg.obj);
break;
+ case SAVE_WEBARCHIVE:
+ WebView.SaveWebArchiveMessage saveMessage =
+ (WebView.SaveWebArchiveMessage)msg.obj;
+ saveMessage.mResultFile =
+ saveWebArchive(saveMessage.mBasename, saveMessage.mAutoname);
+ mWebView.mPrivateHandler.obtainMessage(
+ WebView.SAVE_WEBARCHIVE_FINISHED, saveMessage).sendToTarget();
+ break;
+
case GEOLOCATION_PERMISSIONS_PROVIDE:
GeolocationPermissionsData data =
(GeolocationPermissionsData) msg.obj;
@@ -1601,6 +1614,13 @@
mBrowserFrame.loadUrl(url, extraHeaders);
}
+ private String saveWebArchive(String filename, boolean autoname) {
+ if (DebugFlags.WEB_VIEW_CORE) {
+ Log.v(LOGTAG, " CORE saveWebArchive " + filename + " " + autoname);
+ }
+ return mBrowserFrame.saveWebArchive(filename, autoname);
+ }
+
private void key(KeyEvent evt, boolean isDown) {
if (DebugFlags.WEB_VIEW_CORE) {
Log.v(LOGTAG, "CORE key at " + System.currentTimeMillis() + ", "
diff --git a/core/java/android/widget/AbsListView.java b/core/java/android/widget/AbsListView.java
index e051fbd..70c1e15 100644
--- a/core/java/android/widget/AbsListView.java
+++ b/core/java/android/widget/AbsListView.java
@@ -4287,7 +4287,7 @@
final ArrayList<View> scrap = mScrapViews[i];
final int scrapCount = scrap.size();
for (int j = 0; j < scrapCount; j++) {
- scrap.get(i).setDrawingCacheBackgroundColor(color);
+ scrap.get(j).setDrawingCacheBackgroundColor(color);
}
}
}
diff --git a/core/java/android/widget/ListPopupWindow.java b/core/java/android/widget/ListPopupWindow.java
index e9de385..5c34c2c 100644
--- a/core/java/android/widget/ListPopupWindow.java
+++ b/core/java/android/widget/ListPopupWindow.java
@@ -78,7 +78,7 @@
private AdapterView.OnItemClickListener mItemClickListener;
private AdapterView.OnItemSelectedListener mItemSelectedListener;
- private final ResizePopupRunnable mResizePopupRunnable = new ResizePopupRunnable();
+ private final ResizePopupRunnable mResizePopupRunnable = new ResizePopupRunnable();
private final PopupTouchInterceptor mTouchInterceptor = new PopupTouchInterceptor();
private final PopupScrollListener mScrollListener = new PopupScrollListener();
private final ListSelectorHider mHideSelector = new ListSelectorHider();
@@ -432,6 +432,19 @@
}
/**
+ * Sets the width of the popup window by the size of its content. The final width may be
+ * larger to accommodate styled window dressing.
+ *
+ * @param width Desired width of content in pixels.
+ */
+ public void setContentWidth(int width) {
+ Drawable popupBackground = mPopup.getBackground();
+ if (popupBackground != null) {
+ mDropDownWidth = popupBackground.getIntrinsicWidth() + width;
+ }
+ }
+
+ /**
* @return The height of the popup window in pixels.
*/
public int getHeight() {
diff --git a/core/java/android/widget/TextView.java b/core/java/android/widget/TextView.java
index f591483..0ce8164 100644
--- a/core/java/android/widget/TextView.java
+++ b/core/java/android/widget/TextView.java
@@ -1133,6 +1133,7 @@
setText(mText);
fixFocusableAndClickableSettings();
+ prepareCursorController();
}
private void fixFocusableAndClickableSettings() {
@@ -2375,8 +2376,8 @@
int end = 0;
if (mText != null) {
- start = Selection.getSelectionStart(mText);
- end = Selection.getSelectionEnd(mText);
+ start = getSelectionStart();
+ end = getSelectionEnd();
if (start >= 0 || end >= 0) {
// Or save state if there is a selection
save = true;
@@ -2700,6 +2701,9 @@
if (needEditableForNotification) {
sendAfterTextChanged((Editable) text);
}
+
+ // Depends on canSelectText, which depends on text
+ prepareCursorController();
}
/**
@@ -3617,7 +3621,7 @@
}
private void invalidateCursor() {
- int where = Selection.getSelectionEnd(mText);
+ int where = getSelectionEnd();
invalidateCursor(where, where, where);
}
@@ -3693,7 +3697,18 @@
boolean changed = false;
if (mMovement != null) {
- int curs = Selection.getSelectionEnd(mText);
+ /* This code also provides auto-scrolling when a cursor is moved using a
+ * CursorController (insertion point or selection limits).
+ * For selection, ensure start or end is visible depending on controller's state.
+ */
+ int curs = getSelectionEnd();
+ if (mSelectionModifierCursorController != null) {
+ SelectionModifierCursorController selectionController =
+ (SelectionModifierCursorController) mSelectionModifierCursorController;
+ if (selectionController.isSelectionStartDragged()) {
+ curs = getSelectionStart();
+ }
+ }
/*
* TODO: This should really only keep the end in view if
@@ -3986,8 +4001,8 @@
// XXX This is not strictly true -- a program could set the
// selection manually if it really wanted to.
if (mMovement != null && (isFocused() || isPressed())) {
- selStart = Selection.getSelectionStart(mText);
- selEnd = Selection.getSelectionEnd(mText);
+ selStart = getSelectionStart();
+ selEnd = getSelectionEnd();
if (mCursorVisible && selStart >= 0 && isEnabled()) {
if (mHighlightPath == null)
@@ -4097,6 +4112,9 @@
if (mInsertionPointCursorController != null) {
mInsertionPointCursorController.draw(canvas);
}
+ if (mSelectionModifierCursorController != null) {
+ mSelectionModifierCursorController.draw(canvas);
+ }
}
@Override
@@ -4511,8 +4529,8 @@
outAttrs.hintText = mHint;
if (mText instanceof Editable) {
InputConnection ic = new EditableInputConnection(this);
- outAttrs.initialSelStart = Selection.getSelectionStart(mText);
- outAttrs.initialSelEnd = Selection.getSelectionEnd(mText);
+ outAttrs.initialSelStart = getSelectionStart();
+ outAttrs.initialSelEnd = getSelectionEnd();
outAttrs.initialCapsMode = ic.getCursorCapsMode(mInputType);
return ic;
}
@@ -4597,8 +4615,8 @@
outText.flags |= ExtractedText.FLAG_SINGLE_LINE;
}
outText.startOffset = 0;
- outText.selectionStart = Selection.getSelectionStart(content);
- outText.selectionEnd = Selection.getSelectionEnd(content);
+ outText.selectionStart = getSelectionStart();
+ outText.selectionEnd = getSelectionEnd();
return true;
}
return false;
@@ -4777,7 +4795,7 @@
void updateAfterEdit() {
invalidate();
- int curs = Selection.getSelectionStart(mText);
+ int curs = getSelectionStart();
if (curs >= 0 || (mGravity & Gravity.VERTICAL_GRAVITY_MASK) ==
Gravity.BOTTOM) {
@@ -4921,7 +4939,6 @@
w, alignment, mSpacingMult, mSpacingAdd,
boring, mIncludePad);
}
- // Log.e("aaa", "Boring: " + mTransformed);
mSavedLayout = (BoringLayout) mLayout;
} else if (shouldEllipsize && boring.width <= w) {
@@ -5677,8 +5694,8 @@
if (!(mText instanceof Spannable)) {
return false;
}
- int start = Selection.getSelectionStart(mText);
- int end = Selection.getSelectionEnd(mText);
+ int start = getSelectionStart();
+ int end = getSelectionEnd();
if (start != end) {
return false;
}
@@ -6522,6 +6539,13 @@
}
// Don't leave us in the middle of a batch edit.
onEndBatchEdit();
+
+ if (mInsertionPointCursorController != null) {
+ mInsertionPointCursorController.hide();
+ }
+ if (mSelectionModifierCursorController != null) {
+ mSelectionModifierCursorController.hide();
+ }
}
startStopMarquee(focused);
@@ -6651,12 +6675,15 @@
boolean handled = false;
- int oldSelStart = Selection.getSelectionStart(mText);
- int oldSelEnd = Selection.getSelectionEnd(mText);
+ int oldSelStart = getSelectionStart();
+ int oldSelEnd = getSelectionEnd();
if (mInsertionPointCursorController != null) {
mInsertionPointCursorController.onTouchEvent(event);
}
+ if (mSelectionModifierCursorController != null) {
+ mSelectionModifierCursorController.onTouchEvent(event);
+ }
if (mMovement != null) {
handled |= mMovement.onTouchEvent(this, (Spannable) mText, event);
@@ -6667,8 +6694,8 @@
InputMethodManager imm = (InputMethodManager)
getContext().getSystemService(Context.INPUT_METHOD_SERVICE);
- final int newSelStart = Selection.getSelectionStart(mText);
- final int newSelEnd = Selection.getSelectionEnd(mText);
+ final int newSelStart = getSelectionStart();
+ final int newSelEnd = getSelectionEnd();
CommitSelectionReceiver csr = null;
if (newSelStart != oldSelStart || newSelEnd != oldSelEnd) {
@@ -6689,9 +6716,37 @@
}
private void prepareCursorController() {
+ boolean atLeastOneController = false;
+
// TODO Add an extra android:cursorController flag to disable the controller?
- mInsertionPointCursorController =
- mCursorVisible ? new InsertionPointCursorController() : null;
+ if (mCursorVisible) {
+ atLeastOneController = true;
+ if (mInsertionPointCursorController == null) {
+ mInsertionPointCursorController = new InsertionPointCursorController();
+ }
+ } else {
+ mInsertionPointCursorController = null;
+ }
+
+ if (canSelectText()) {
+ atLeastOneController = true;
+ if (mSelectionModifierCursorController == null) {
+ mSelectionModifierCursorController = new SelectionModifierCursorController();
+ }
+ } else {
+ mSelectionModifierCursorController = null;
+ }
+
+ if (atLeastOneController) {
+ if (sCursorControllerTempRect == null) {
+ sCursorControllerTempRect = new Rect();
+ }
+ Resources res = mContext.getResources();
+ mCursorControllerVerticalOffset = res.getDimensionPixelOffset(
+ com.android.internal.R.dimen.cursor_controller_vertical_offset);
+ } else {
+ sCursorControllerTempRect = null;
+ }
}
/**
@@ -6751,8 +6806,8 @@
TextView tv = mView.get();
if (tv != null && tv.isFocused()) {
- int st = Selection.getSelectionStart(tv.mText);
- int en = Selection.getSelectionEnd(tv.mText);
+ int st = tv.getSelectionStart();
+ int en = tv.getSelectionEnd();
if (st == en && st >= 0 && en >= 0) {
if (tv.mLayout != null) {
@@ -6944,6 +6999,9 @@
}
private boolean canSelectText() {
+ // prepareCursorController() relies on this method.
+ // If you change this condition, make sure prepareCursorController is called anywhere
+ // the value of this condition might be changed.
if (mText instanceof Spannable && mText.length() != 0 &&
mMovement != null && mMovement.canSelectArbitrarily()) {
return true;
@@ -6992,10 +7050,14 @@
}
/**
- * Returns a word to add to the dictionary from the context menu,
- * or null if there is no cursor or no word at the cursor.
+ * Returns the offsets delimiting the 'word' located at position offset.
+ *
+ * @param offset An offset in the text.
+ * @return The offsets for the start and end of the word located at <code>offset</code>.
+ * The two ints offsets are packed in a long, with the starting offset shifted by 32 bits.
+ * Returns a negative value if no valid word was found.
*/
- private String getWordForDictionary() {
+ private long getWordLimitsAt(int offset) {
/*
* Quick return if the input type is one where adding words
* to the dictionary doesn't make any sense.
@@ -7004,7 +7066,7 @@
if (klass == InputType.TYPE_CLASS_NUMBER ||
klass == InputType.TYPE_CLASS_PHONE ||
klass == InputType.TYPE_CLASS_DATETIME) {
- return null;
+ return -1;
}
int variation = mInputType & InputType.TYPE_MASK_VARIATION;
@@ -7013,13 +7075,13 @@
variation == InputType.TYPE_TEXT_VARIATION_VISIBLE_PASSWORD ||
variation == InputType.TYPE_TEXT_VARIATION_EMAIL_ADDRESS ||
variation == InputType.TYPE_TEXT_VARIATION_FILTER) {
- return null;
+ return -1;
}
- int end = getSelectionEnd();
+ int end = offset;
if (end < 0) {
- return null;
+ return -1;
}
int start = end;
@@ -7053,6 +7115,14 @@
}
}
+ if (start == end) {
+ return -1;
+ }
+
+ if (end - start > 48) {
+ return -1;
+ }
+
boolean hasLetter = false;
for (int i = start; i < end; i++) {
if (Character.isLetter(mTransformed.charAt(i))) {
@@ -7060,19 +7130,28 @@
break;
}
}
+
if (!hasLetter) {
- return null;
+ return -1;
}
- if (start == end) {
- return null;
- }
+ // Two ints packed in a long
+ return (((long) start) << 32) | end;
+ }
- if (end - start > 48) {
+ /**
+ * Returns a word to add to the dictionary from the context menu,
+ * or null if there is no cursor or no word at the cursor.
+ */
+ private String getWordForDictionary() {
+ long wordLimits = getWordLimitsAt(getSelectionEnd());
+ if (wordLimits < 0) {
return null;
+ } else {
+ int start = (int) (wordLimits >>> 32);
+ int end = (int) (wordLimits & 0x00000000FFFFFFFFL);
+ return TextUtils.substring(mTransformed, start, end);
}
-
- return TextUtils.substring(mTransformed, start, end);
}
@Override
@@ -7372,6 +7451,15 @@
@Override
public boolean performLongClick() {
+ // TODO This behavior should be moved to View
+ // TODO handle legacy code that added items to context menu
+ if (canSelectText()) {
+ if (startSelectionMode()) {
+ mEatTouchRelease = true;
+ return true;
+ }
+ }
+
if (super.performLongClick()) {
mEatTouchRelease = true;
return true;
@@ -7380,6 +7468,83 @@
return false;
}
+ private boolean startSelectionMode() {
+ if (mSelectionModifierCursorController != null) {
+ int offset = ((SelectionModifierCursorController) mSelectionModifierCursorController).
+ getTouchOffset();
+
+ int selectionStart, selectionEnd;
+
+ if (hasSelection()) {
+ selectionStart = getSelectionStart();
+ selectionEnd = getSelectionEnd();
+ if (selectionStart > selectionEnd) {
+ int tmp = selectionStart;
+ selectionStart = selectionEnd;
+ selectionEnd = tmp;
+ }
+ if ((offset >= selectionStart) && (offset <= selectionEnd)) {
+ // Long press in the current selection.
+ // Should initiate a drag. Return false, to rely on context menu for now.
+ return false;
+ }
+ }
+
+ long wordLimits = getWordLimitsAt(offset);
+ if (wordLimits >= 0) {
+ selectionStart = (int) (wordLimits >>> 32);
+ selectionEnd = (int) (wordLimits & 0x00000000FFFFFFFFL);
+ } else {
+ selectionStart = Math.max(offset - 5, 0);
+ selectionEnd = Math.min(offset + 5, mText.length());
+ }
+
+ Selection.setSelection((Spannable) mText, selectionStart, selectionEnd);
+
+ // Has to be done AFTER selection has been changed to correctly position controllers.
+ mSelectionModifierCursorController.show();
+
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Get the offset character closest to the specified absolute position.
+ *
+ * @param x The horizontal absolute position of a point on screen
+ * @param y The vertical absolute position of a point on screen
+ * @return the character offset for the character whose position is closest to the specified
+ * position.
+ *
+ * @hide
+ */
+ public int getOffset(int x, int y) {
+ x -= getTotalPaddingLeft();
+ y -= getTotalPaddingTop();
+
+ // Clamp the position to inside of the view.
+ if (x < 0) {
+ x = 0;
+ } else if (x >= (getWidth() - getTotalPaddingRight())) {
+ x = getWidth()-getTotalPaddingRight() - 1;
+ }
+ if (y < 0) {
+ y = 0;
+ } else if (y >= (getHeight() - getTotalPaddingBottom())) {
+ y = getHeight()-getTotalPaddingBottom() - 1;
+ }
+
+ x += getScrollX();
+ y += getScrollY();
+
+ Layout layout = getLayout();
+ final int line = layout.getLineForVertical(y);
+ final int offset = layout.getOffsetForHorizontal(line, x);
+ return offset;
+ }
+
/**
* A CursorController instance can be used to control a cursor in the text.
*
@@ -7387,6 +7552,9 @@
* and send them to this object instead of the cursor.
*/
public interface CursorController {
+ /* Cursor fade-out animation duration, in milliseconds. */
+ static final int FADE_OUT_DURATION = 400;
+
/**
* Makes the cursor controller visible on screen. Will be drawn by {@link #draw(Canvas)}.
* See also {@link #hide()}.
@@ -7402,19 +7570,19 @@
/**
* Update the controller's position.
*/
- public void updatePosition();
+ public void updatePosition(int offset);
/**
* The controller and the cursor's positions can be link by a fixed offset,
* computed when the controller is touched, and then maintained as it moves
* @return Horizontal offset between the controller and the cursor.
*/
- public int getOffsetX();
+ public float getOffsetX();
/**
* @return Vertical offset between the controller and the cursor.
*/
- public int getOffsetY();
+ public float getOffsetY();
/**
* This method is called by {@link #onTouchEvent(MotionEvent)} and gives the controller
@@ -7434,12 +7602,9 @@
class InsertionPointCursorController implements CursorController {
private static final int DELAY_BEFORE_FADE_OUT = 2100;
- private static final int FADE_OUT_DURATION = 400;
// Whether or not the cursor control is currently visible
private boolean mIsVisible = false;
- // Current cursor control bounds, in content coordinates
- private final Rect mBounds = new Rect();
// Starting time of the fade timer
private long mFadeOutTimerStart;
// The cursor controller image
@@ -7447,7 +7612,7 @@
// Used to detect a tap (vs drag) on the controller
private long mOnDownTimerStart;
// Offset between finger hot point on cursor controller and actual cursor
- private int mOffsetX, mOffsetY;
+ private float mOffsetX, mOffsetY;
InsertionPointCursorController() {
Resources res = mContext.getResources();
@@ -7455,10 +7620,13 @@
}
public void show() {
- updatePosition();
+ updateDrawablePosition();
// Has to be done after updatePosition, so that previous position invalidate
// in only done if necessary.
mIsVisible = true;
+ if (mSelectionModifierCursorController != null) {
+ mSelectionModifierCursorController.hide();
+ }
}
public void hide() {
@@ -7467,7 +7635,7 @@
// Start fading out, only if not already in progress
if (time - mFadeOutTimerStart < DELAY_BEFORE_FADE_OUT) {
mFadeOutTimerStart = time - DELAY_BEFORE_FADE_OUT;
- postInvalidate(mBounds.left, mBounds.top, mBounds.right, mBounds.bottom);
+ postInvalidate(mDrawable);
}
}
}
@@ -7476,15 +7644,13 @@
if (mIsVisible) {
int time = (int) (System.currentTimeMillis() - mFadeOutTimerStart);
if (time <= DELAY_BEFORE_FADE_OUT) {
- postInvalidateDelayed(DELAY_BEFORE_FADE_OUT - time,
- mBounds.left, mBounds.top, mBounds.right, mBounds.bottom);
+ postInvalidateDelayed(DELAY_BEFORE_FADE_OUT - time, mDrawable);
} else {
time -= DELAY_BEFORE_FADE_OUT;
if (time <= FADE_OUT_DURATION) {
- int alpha = 255 * (FADE_OUT_DURATION - time) / FADE_OUT_DURATION;
+ final int alpha = 255 * (FADE_OUT_DURATION - time) / FADE_OUT_DURATION;
mDrawable.setAlpha(alpha);
- postInvalidateDelayed(30,
- mBounds.left, mBounds.top, mBounds.right, mBounds.bottom);
+ postInvalidateDelayed(30, mDrawable);
} else {
mDrawable.setAlpha(0);
mIsVisible = false;
@@ -7494,115 +7660,301 @@
}
}
- public void updatePosition() {
+ public void updatePosition(int offset) {
+ Selection.setSelection((Spannable) mText, offset);
+ updateDrawablePosition();
+ }
+
+ private void updateDrawablePosition() {
if (mIsVisible) {
// Clear previous cursor controller before bounds are updated
- postInvalidate(mBounds.left, mBounds.top, mBounds.right, mBounds.bottom);
+ postInvalidate(mDrawable);
}
- final int offset = Selection.getSelectionStart(mText);
+ final int offset = getSelectionStart();
if (offset < 0) {
// Should never happen, safety check.
- Log.w(LOG_TAG, "Update cursor controller position called with no cursor", null);
+ Log.w(LOG_TAG, "Update cursor controller position called with no cursor");
mIsVisible = false;
return;
}
- final int cursorControllerDrawableWidth = mDrawable.getIntrinsicWidth();
- final int cursorControllerDrawableHeight = mDrawable.getIntrinsicHeight();
- final int line = mLayout.getLineForOffset(offset);
-
- mBounds.left = (int) (mLayout.getPrimaryHorizontal(offset) - 0.5 -
- cursorControllerDrawableWidth / 2.0);
- mBounds.top = mLayout.getLineTop(line + 1);
-
- // Move cursor controller a little bit up when editing the last line of text
- // (or a single line) so that it is visible and easier to grab.
- if (line == mLayout.getLineCount() - 1) {
- mBounds.top -= Math.max(0,
- cursorControllerDrawableHeight / 2 - getExtendedPaddingBottom());
- }
-
- mBounds.right = mBounds.left + cursorControllerDrawableWidth;
- mBounds.bottom = mBounds.top + cursorControllerDrawableHeight;
-
- convertFromViewportToContentCoordinates(mBounds);
- mDrawable.setBounds(mBounds);
+ positionDrawableUnderCursor(offset, mDrawable);
mFadeOutTimerStart = System.currentTimeMillis();
mDrawable.setAlpha(255);
-
- postInvalidate(mBounds.left, mBounds.top, mBounds.right, mBounds.bottom);
}
public void onTouchEvent(MotionEvent event) {
- if (isFocused() && isTextEditable()) {
- if (event.getActionMasked() == MotionEvent.ACTION_DOWN && mIsVisible) {
- final int x = (int) event.getX();
- final int y = (int) event.getY();
+ if (isFocused() && isTextEditable() && mIsVisible) {
+ switch (event.getActionMasked()) {
+ case MotionEvent.ACTION_DOWN : {
+ final float x = event.getX();
+ final float y = event.getY();
- // Simulate a 'fat finger' to ease grabbing of the controller.
- // Expand according to controller image size instead of using density.
- // Assume controller imager has a sensible size, proportionnal to density.
- final int cursorControllerDrawableWidth = mDrawable.getIntrinsicWidth();
- final int cursorControllerDrawableHeight = mDrawable.getIntrinsicHeight();
- final Rect fingerRect = new Rect(
- x - cursorControllerDrawableWidth / 2,
- y - cursorControllerDrawableHeight,
- x + cursorControllerDrawableWidth / 2,
- y);
+ if (fingerIsOnDrawable(x, y, mDrawable)) {
+ show();
- if (Rect.intersects(mBounds, fingerRect)) {
- show();
+ if (mMovement instanceof ArrowKeyMovementMethod) {
+ ((ArrowKeyMovementMethod)mMovement).setCursorController(this);
+ }
- if (mMovement instanceof ArrowKeyMovementMethod) {
- ((ArrowKeyMovementMethod)mMovement).setCursorController(this);
+ if (mParent != null) {
+ // Prevent possible scrollView parent from scrolling, so that
+ // we can use auto-scrolling.
+ mParent.requestDisallowInterceptTouchEvent(true);
+
+ final Rect bounds = mDrawable.getBounds();
+ mOffsetX = (bounds.left + bounds.right) / 2.0f - x;
+ mOffsetY = bounds.top - mCursorControllerVerticalOffset - y;
+
+ mOnDownTimerStart = event.getEventTime();
+ }
}
-
- if (mParent != null) {
- // Prevent possible scrollView parent from scrolling, so that
- // we can use auto-scrolling.
- mParent.requestDisallowInterceptTouchEvent(true);
-
- Resources res = mContext.getResources();
- final int verticalOffset = res.getDimensionPixelOffset(
- com.android.internal.R.dimen.cursor_controller_vertical_offset);
-
- mOffsetX = (mBounds.left + mBounds.right) / 2 - x;
- mOffsetY = mBounds.top - verticalOffset - y;
-
- mOnDownTimerStart = System.currentTimeMillis();
- }
+ break;
}
- } else if (event.getActionMasked() == MotionEvent.ACTION_UP) {
- int time = (int) (System.currentTimeMillis() - mOnDownTimerStart);
- if (mIsVisible && (time <= ViewConfiguration.getTapTimeout())) {
- // A tap on the controller is not grabbed, move the cursor instead
- final int x = (int) event.getX();
- final int y = (int) event.getY();
+ case MotionEvent.ACTION_UP : {
+ int time = (int) (event.getEventTime() - mOnDownTimerStart);
- Layout layout = getLayout();
- int line = layout.getLineForVertical(y);
- int offset = layout.getOffsetForHorizontal(line, x);
- Selection.setSelection((Spannable) mText, offset);
- // Modified by cancelLongPress and prevents the cursor from changing
- mScrolled = false;
+ if (time <= ViewConfiguration.getTapTimeout()) {
+ // A tap on the controller is not grabbed, move the cursor instead
+ int offset = getOffset((int) event.getX(), (int) event.getY());
+ Selection.setSelection((Spannable) mText, offset);
+
+ // Modified by cancelLongPress and prevents the cursor from changing
+ mScrolled = false;
+ }
+ break;
}
}
}
}
- public int getOffsetX() {
+ public float getOffsetX() {
return mOffsetX;
}
- public int getOffsetY() {
+ public float getOffsetY() {
return mOffsetY;
}
}
+ class SelectionModifierCursorController implements CursorController {
+ // Whether or not the selection controls are currently visible
+ private boolean mIsVisible = false;
+ // Whether that start or the end of selection controller is dragged
+ private boolean mStartIsDragged = false;
+ // Starting time of the fade timer
+ private long mFadeOutTimerStart;
+ // The cursor controller images
+ private final Drawable mStartDrawable, mEndDrawable;
+ // Offset between finger hot point on active cursor controller and actual cursor
+ private float mOffsetX, mOffsetY;
+ // The offset of that last touch down event. Remembered to start selection there.
+ private int mTouchOffset;
+
+ SelectionModifierCursorController() {
+ Resources res = mContext.getResources();
+ mStartDrawable = res.getDrawable(com.android.internal.R.drawable.selection_start_handle);
+ mEndDrawable = res.getDrawable(com.android.internal.R.drawable.selection_end_handle);
+ }
+
+ public void show() {
+ updateDrawablesPositions();
+ // Has to be done after updatePosition, so that previous position invalidate
+ // in only done if necessary.
+ mIsVisible = true;
+ mFadeOutTimerStart = -1;
+ if (mInsertionPointCursorController != null) {
+ mInsertionPointCursorController.hide();
+ }
+ }
+
+ public void hide() {
+ if (mIsVisible && (mFadeOutTimerStart < 0)) {
+ mFadeOutTimerStart = System.currentTimeMillis();
+ postInvalidate(mStartDrawable);
+ postInvalidate(mEndDrawable);
+ }
+ }
+
+ public void draw(Canvas canvas) {
+ if (mIsVisible) {
+ if (mFadeOutTimerStart >= 0) {
+ int time = (int) (System.currentTimeMillis() - mFadeOutTimerStart);
+ if (time <= FADE_OUT_DURATION) {
+ final int alpha = 255 * (FADE_OUT_DURATION - time) / FADE_OUT_DURATION;
+ mStartDrawable.setAlpha(alpha);
+ mEndDrawable.setAlpha(alpha);
+ postInvalidateDelayed(30, mStartDrawable);
+ postInvalidateDelayed(30, mEndDrawable);
+ } else {
+ mStartDrawable.setAlpha(0);
+ mEndDrawable.setAlpha(0);
+ mIsVisible = false;
+ }
+ }
+ mStartDrawable.draw(canvas);
+ mEndDrawable.draw(canvas);
+ }
+ }
+
+ public void updatePosition(int offset) {
+ int selectionStart = getSelectionStart();
+ int selectionEnd = getSelectionEnd();
+
+ // Handle the case where start and end are swapped, making sure start <= end
+ if (mStartIsDragged) {
+ if (offset <= selectionEnd) {
+ selectionStart = offset;
+ } else {
+ selectionStart = selectionEnd;
+ selectionEnd = offset;
+ mStartIsDragged = false;
+ }
+ } else {
+ if (offset >= selectionStart) {
+ selectionEnd = offset;
+ } else {
+ selectionEnd = selectionStart;
+ selectionStart = offset;
+ mStartIsDragged = true;
+ }
+ }
+
+ Selection.setSelection((Spannable) mText, selectionStart, selectionEnd);
+ updateDrawablesPositions();
+ }
+
+ private void updateDrawablesPositions() {
+ if (mIsVisible) {
+ // Clear previous cursor controller before bounds are updated
+ postInvalidate(mStartDrawable);
+ postInvalidate(mEndDrawable);
+ }
+
+ final int selectionStart = getSelectionStart();
+ final int selectionEnd = getSelectionEnd();
+
+ if ((selectionStart < 0) || (selectionEnd < 0)) {
+ // Should never happen, safety check.
+ Log.w(LOG_TAG, "Update selection controller position called with no cursor");
+ mIsVisible = false;
+ return;
+ }
+
+ positionDrawableUnderCursor(selectionStart, mStartDrawable);
+ positionDrawableUnderCursor(selectionEnd, mEndDrawable);
+
+ mStartDrawable.setAlpha(255);
+ mEndDrawable.setAlpha(255);
+ }
+
+ public void onTouchEvent(MotionEvent event) {
+ if (isFocused() && isTextEditable() &&
+ (event.getActionMasked() == MotionEvent.ACTION_DOWN)) {
+ final int x = (int) event.getX();
+ final int y = (int) event.getY();
+
+ // Remember finger down position, to be able to start selection on that point
+ mTouchOffset = getOffset(x, y);
+
+ if (mIsVisible) {
+ if (mMovement instanceof ArrowKeyMovementMethod) {
+ boolean isOnStart = fingerIsOnDrawable(x, y, mStartDrawable);
+ boolean isOnEnd = fingerIsOnDrawable(x, y, mEndDrawable);
+ if (isOnStart || isOnEnd) {
+ if (mParent != null) {
+ // Prevent possible scrollView parent from scrolling, so that
+ // we can use auto-scrolling.
+ mParent.requestDisallowInterceptTouchEvent(true);
+ }
+
+ // Start handle will be dragged in case BOTH controller are under finger
+ mStartIsDragged = isOnStart;
+ final Rect bounds =
+ (mStartIsDragged ? mStartDrawable : mEndDrawable).getBounds();
+ mOffsetX = (bounds.left + bounds.right) / 2.0f - x;
+ mOffsetY = bounds.top - mCursorControllerVerticalOffset - y;
+
+ ((ArrowKeyMovementMethod)mMovement).setCursorController(this);
+ }
+ }
+ }
+ }
+ }
+
+ public int getTouchOffset() {
+ return mTouchOffset;
+ }
+
+ public float getOffsetX() {
+ return mOffsetX;
+ }
+
+ public float getOffsetY() {
+ return mOffsetY;
+ }
+
+ /**
+ * @return true iff this controller is currently used to move the selection start.
+ */
+ public boolean isSelectionStartDragged() {
+ return mIsVisible && mStartIsDragged;
+ }
+ }
+
+ // Helper methods used by CursorController implementations
+
+ private void positionDrawableUnderCursor(final int offset, Drawable drawable) {
+ final int drawableWidth = drawable.getIntrinsicWidth();
+ final int drawableHeight = drawable.getIntrinsicHeight();
+ final int line = mLayout.getLineForOffset(offset);
+
+ final Rect bounds = sCursorControllerTempRect;
+ bounds.left = (int) (mLayout.getPrimaryHorizontal(offset) - 0.5 - drawableWidth / 2.0);
+ bounds.top = mLayout.getLineTop(line + 1);
+
+ // Move cursor controller a little bit up when editing the last line of text
+ // (or a single line) so that it is visible and easier to grab.
+ if (line == mLayout.getLineCount() - 1) {
+ bounds.top -= Math.max(0, drawableHeight / 2 - getExtendedPaddingBottom());
+ }
+
+ bounds.right = bounds.left + drawableWidth;
+ bounds.bottom = bounds.top + drawableHeight;
+
+ convertFromViewportToContentCoordinates(bounds);
+ drawable.setBounds(bounds);
+ postInvalidate(bounds.left, bounds.top, bounds.right, bounds.bottom);
+ }
+
+ private boolean fingerIsOnDrawable(float x, float y, Drawable drawable) {
+ // Simulate a 'fat finger' to ease grabbing of the controller.
+ // Expands according to controller image size instead of using density.
+ // Assumes controller imager has a sensible size, proportionnal to density.
+ final int drawableWidth = drawable.getIntrinsicWidth();
+ final int drawableHeight = drawable.getIntrinsicHeight();
+ final Rect fingerRect = sCursorControllerTempRect;
+ fingerRect.set((int) (x - drawableWidth / 2.0),
+ (int) (y - drawableHeight),
+ (int) (x + drawableWidth / 2.0),
+ (int) y);
+ return Rect.intersects(drawable.getBounds(), fingerRect);
+ }
+
+ private void postInvalidate(Drawable drawable) {
+ final Rect bounds = drawable.getBounds();
+ postInvalidate(bounds.left, bounds.top, bounds.right, bounds.bottom);
+ }
+
+ private void postInvalidateDelayed(long delay, Drawable drawable) {
+ final Rect bounds = drawable.getBounds();
+ postInvalidateDelayed(delay, bounds.left, bounds.top, bounds.right, bounds.bottom);
+ }
+
@ViewDebug.ExportedProperty
private CharSequence mText;
private CharSequence mTransformed;
@@ -7624,15 +7976,20 @@
private final TextPaint mTextPaint;
private boolean mUserSetTextScaleX;
private final Paint mHighlightPaint;
- private int mHighlightColor = 0xFFBBDDFF;
+ private int mHighlightColor = 0xD077A14B;
private Layout mLayout;
private long mShowCursor;
private Blink mBlink;
private boolean mCursorVisible = true;
- // Cursor Controller. Null when disabled.
+ // Cursor Controllers. Null when disabled.
private CursorController mInsertionPointCursorController;
+ private CursorController mSelectionModifierCursorController;
+ // Stored once and for all.
+ private int mCursorControllerVerticalOffset;
+ // Created once and shared by different CursorController helper methods.
+ private static Rect sCursorControllerTempRect;
private boolean mSelectAllOnFocus = false;
diff --git a/core/java/com/android/internal/app/ActionBarImpl.java b/core/java/com/android/internal/app/ActionBarImpl.java
index 6cf455c..f37021b 100644
--- a/core/java/com/android/internal/app/ActionBarImpl.java
+++ b/core/java/com/android/internal/app/ActionBarImpl.java
@@ -34,6 +34,7 @@
import android.widget.SpinnerAdapter;
import android.widget.ViewAnimator;
+import java.lang.ref.WeakReference;
import java.util.ArrayList;
/**
@@ -70,6 +71,8 @@
private int mContextDisplayMode;
+ private boolean mClosingContext;
+
final Handler mHandler = new Handler();
final Runnable mCloseContext = new Runnable() {
public void run() {
@@ -77,6 +80,7 @@
if (mLowerContextView != null) {
mLowerContextView.removeAllViews();
}
+ mClosingContext = false;
}
};
@@ -195,6 +199,14 @@
if (mContextMode != null) {
mContextMode.finish();
}
+
+ // Don't wait for the close context mode animation to finish.
+ if (mClosingContext) {
+ mAnimatorView.clearAnimation();
+ mHandler.removeCallbacks(mCloseContext);
+ mCloseContext.run();
+ }
+
mContextMode = new ContextMode(callback);
if (callback.onCreateContextMode(mContextMode, mContextMode.getMenu())) {
mContextMode.invalidate();
@@ -327,6 +339,7 @@
public class ContextMode extends ActionBar.ContextMode {
private ContextModeCallback mCallback;
private ActionMenu mMenu;
+ private WeakReference<View> mCustomView;
public ContextMode(ContextModeCallback callback) {
mCallback = callback;
@@ -344,6 +357,7 @@
mAnimatorView.setDisplayedChild(NORMAL_VIEW);
// Clear out the context mode views after the animation finishes
+ mClosingContext = true;
mHandler.postDelayed(mCloseContext, mAnimatorView.getOutAnimation().getDuration());
if (mLowerContextView != null && mLowerContextView.getVisibility() != View.GONE) {
@@ -363,6 +377,7 @@
@Override
public void setCustomView(View view) {
mUpperContextView.setCustomView(view);
+ mCustomView = new WeakReference<View>(view);
}
@Override
@@ -374,7 +389,22 @@
public void setTitle(CharSequence title) {
mUpperContextView.setTitle(title);
}
+
+ @Override
+ public CharSequence getTitle() {
+ return mUpperContextView.getTitle();
+ }
+
+ @Override
+ public CharSequence getSubtitle() {
+ return mUpperContextView.getSubtitle();
+ }
+ @Override
+ public View getCustomView() {
+ return mCustomView != null ? mCustomView.get() : null;
+ }
+
public void dispatchOnContextItemClicked(MenuItem item) {
ActionMenuItem actionItem = (ActionMenuItem) item;
if (!actionItem.invoke()) {
diff --git a/core/java/com/android/internal/app/AlertController.java b/core/java/com/android/internal/app/AlertController.java
index 107b145..4a0617c 100644
--- a/core/java/com/android/internal/app/AlertController.java
+++ b/core/java/com/android/internal/app/AlertController.java
@@ -435,6 +435,7 @@
View titleTemplate = mWindow.findViewById(R.id.title_template);
titleTemplate.setVisibility(View.GONE);
mIconView.setVisibility(View.GONE);
+ topPanel.setVisibility(View.GONE);
hasTitle = false;
}
}
diff --git a/core/java/com/android/internal/view/RootViewSurfaceTaker.java b/core/java/com/android/internal/view/RootViewSurfaceTaker.java
index 7ff8d4c..9c1b558 100644
--- a/core/java/com/android/internal/view/RootViewSurfaceTaker.java
+++ b/core/java/com/android/internal/view/RootViewSurfaceTaker.java
@@ -5,7 +5,7 @@
/** hahahah */
public interface RootViewSurfaceTaker {
- SurfaceHolder.Callback willYouTakeTheSurface();
+ SurfaceHolder.Callback2 willYouTakeTheSurface();
void setSurfaceType(int type);
void setSurfaceFormat(int format);
void setSurfaceKeepScreenOn(boolean keepOn);
diff --git a/core/java/com/android/internal/view/menu/MenuBuilder.java b/core/java/com/android/internal/view/menu/MenuBuilder.java
index a962212..94a9f65 100644
--- a/core/java/com/android/internal/view/menu/MenuBuilder.java
+++ b/core/java/com/android/internal/view/menu/MenuBuilder.java
@@ -55,7 +55,7 @@
private static final String LOGTAG = "MenuBuilder";
/** The number of different menu types */
- public static final int NUM_TYPES = 4;
+ public static final int NUM_TYPES = 5;
/** The menu type that represents the icon menu view */
public static final int TYPE_ICON = 0;
/** The menu type that represents the expanded menu view */
@@ -66,20 +66,24 @@
* have an ItemView.
*/
public static final int TYPE_DIALOG = 2;
-
/**
* The menu type that represents a button in the application's action bar.
*/
public static final int TYPE_ACTION_BUTTON = 3;
+ /**
+ * The menu type that represents a menu popup.
+ */
+ public static final int TYPE_POPUP = 4;
private static final String VIEWS_TAG = "android:views";
-
+
// Order must be the same order as the TYPE_*
static final int THEME_RES_FOR_TYPE[] = new int[] {
com.android.internal.R.style.Theme_IconMenu,
com.android.internal.R.style.Theme_ExpandedMenu,
0,
0,
+ 0,
};
// Order must be the same order as the TYPE_*
@@ -88,6 +92,7 @@
com.android.internal.R.layout.expanded_menu_layout,
0,
com.android.internal.R.layout.action_menu_layout,
+ 0,
};
// Order must be the same order as the TYPE_*
@@ -96,6 +101,7 @@
com.android.internal.R.layout.list_menu_item_layout,
com.android.internal.R.layout.list_menu_item_layout,
com.android.internal.R.layout.action_menu_item_layout,
+ com.android.internal.R.layout.list_menu_item_layout,
};
private static final int[] sCategoryToOrder = new int[] {
@@ -1251,7 +1257,19 @@
}
public View getView(int position, View convertView, ViewGroup parent) {
- return ((MenuItemImpl) getItem(position)).getItemView(mMenuType, parent);
+ if (convertView != null) {
+ MenuView.ItemView itemView = (MenuView.ItemView) convertView;
+ itemView.getItemData().setItemView(mMenuType, null);
+
+ MenuItemImpl item = (MenuItemImpl) getItem(position);
+ itemView.initialize(item, mMenuType);
+ item.setItemView(mMenuType, itemView);
+ return convertView;
+ } else {
+ MenuItemImpl item = (MenuItemImpl) getItem(position);
+ item.setItemView(mMenuType, null);
+ return item.getItemView(mMenuType, parent);
+ }
}
}
diff --git a/core/java/com/android/internal/view/menu/MenuItemImpl.java b/core/java/com/android/internal/view/menu/MenuItemImpl.java
index 5fe75be..fecbd77 100644
--- a/core/java/com/android/internal/view/menu/MenuItemImpl.java
+++ b/core/java/com/android/internal/view/menu/MenuItemImpl.java
@@ -583,6 +583,10 @@
return (View) mItemViews[menuType].get();
}
+ void setItemView(int menuType, ItemView view) {
+ mItemViews[menuType] = new WeakReference<ItemView>(view);
+ }
+
/**
* Create and initializes a menu item view that implements {@link MenuView.ItemView}.
* @param menuType The type of menu to get a View for (must be one of
@@ -631,7 +635,10 @@
* @return Whether the given menu type should show icons for menu items.
*/
public boolean shouldShowIcon(int menuType) {
- return menuType == MenuBuilder.TYPE_ICON || mMenu.getOptionalIconsVisible();
+ return menuType == MenuBuilder.TYPE_ICON ||
+ menuType == MenuBuilder.TYPE_ACTION_BUTTON ||
+ menuType == MenuBuilder.TYPE_POPUP ||
+ mMenu.getOptionalIconsVisible();
}
public boolean isActionButton() {
diff --git a/core/java/com/android/internal/view/menu/MenuPopupHelper.java b/core/java/com/android/internal/view/menu/MenuPopupHelper.java
new file mode 100644
index 0000000..751ecda
--- /dev/null
+++ b/core/java/com/android/internal/view/menu/MenuPopupHelper.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.internal.view.menu;
+
+import com.android.internal.view.menu.MenuBuilder.MenuAdapter;
+
+import android.content.Context;
+import android.util.DisplayMetrics;
+import android.view.View;
+import android.view.View.MeasureSpec;
+import android.widget.AdapterView;
+import android.widget.ListPopupWindow;
+
+/**
+ * @hide
+ */
+public class MenuPopupHelper implements AdapterView.OnItemClickListener {
+ private static final String TAG = "MenuPopupHelper";
+
+ private Context mContext;
+ private ListPopupWindow mPopup;
+ private SubMenuBuilder mSubMenu;
+ private int mPopupMaxWidth;
+
+ public MenuPopupHelper(Context context, SubMenuBuilder subMenu) {
+ mContext = context;
+ mSubMenu = subMenu;
+
+ final DisplayMetrics metrics = context.getResources().getDisplayMetrics();
+ mPopupMaxWidth = metrics.widthPixels / 2;
+ }
+
+ public void show() {
+ // TODO Use a style from the theme here
+ mPopup = new ListPopupWindow(mContext, null, 0,
+ com.android.internal.R.style.Widget_Spinner);
+ mPopup.setOnItemClickListener(this);
+
+ final MenuAdapter adapter = mSubMenu.getMenuAdapter(MenuBuilder.TYPE_POPUP);
+ mPopup.setAdapter(adapter);
+ mPopup.setModal(true);
+
+ final MenuItemImpl itemImpl = (MenuItemImpl) mSubMenu.getItem();
+ final View anchorView = itemImpl.getItemView(MenuBuilder.TYPE_ACTION_BUTTON, null);
+ mPopup.setAnchorView(anchorView);
+
+ mPopup.setContentWidth(Math.min(measureContentWidth(adapter), mPopupMaxWidth));
+ mPopup.show();
+ }
+
+ public void dismiss() {
+ mPopup.dismiss();
+ mPopup = null;
+ }
+
+ public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
+ mSubMenu.performItemAction(mSubMenu.getItem(position), 0);
+ mPopup.dismiss();
+ }
+
+ private int measureContentWidth(MenuAdapter adapter) {
+ // Menus don't tend to be long, so this is more sane than it looks.
+ int width = 0;
+ View itemView = null;
+ final int widthMeasureSpec =
+ MeasureSpec.makeMeasureSpec(0, MeasureSpec.UNSPECIFIED);
+ final int heightMeasureSpec =
+ MeasureSpec.makeMeasureSpec(0, MeasureSpec.UNSPECIFIED);
+ final int count = adapter.getCount();
+ for (int i = 0; i < count; i++) {
+ itemView = adapter.getView(i, itemView, null);
+ itemView.measure(widthMeasureSpec, heightMeasureSpec);
+ width = Math.max(width, itemView.getMeasuredWidth());
+ }
+ return width;
+ }
+}
diff --git a/core/java/com/android/internal/widget/ActionBarContextView.java b/core/java/com/android/internal/widget/ActionBarContextView.java
index 0f895f0..b57b7a8 100644
--- a/core/java/com/android/internal/widget/ActionBarContextView.java
+++ b/core/java/com/android/internal/widget/ActionBarContextView.java
@@ -105,6 +105,14 @@
initTitle();
}
+ public CharSequence getTitle() {
+ return mTitle;
+ }
+
+ public CharSequence getSubtitle() {
+ return mSubtitle;
+ }
+
private void initTitle() {
if (mTitleLayout == null) {
LayoutInflater inflater = LayoutInflater.from(getContext());
diff --git a/core/java/com/android/internal/widget/ActionBarView.java b/core/java/com/android/internal/widget/ActionBarView.java
index 48707b9..fbff8ae 100644
--- a/core/java/com/android/internal/widget/ActionBarView.java
+++ b/core/java/com/android/internal/widget/ActionBarView.java
@@ -91,6 +91,7 @@
private View mCustomNavView;
private boolean mShowMenu;
+ private boolean mUserTitle;
private MenuBuilder mOptionsMenu;
private ActionMenuView mMenuView;
@@ -212,7 +213,30 @@
return mTitle;
}
+ /**
+ * Set the action bar title. This will always replace or override window titles.
+ * @param title Title to set
+ *
+ * @see #setWindowTitle(CharSequence)
+ */
public void setTitle(CharSequence title) {
+ mUserTitle = true;
+ setTitleImpl(title);
+ }
+
+ /**
+ * Set the window title. A window title will always be replaced or overridden by a user title.
+ * @param title Title to set
+ *
+ * @see #setTitle(CharSequence)
+ */
+ public void setWindowTitle(CharSequence title) {
+ if (!mUserTitle) {
+ setTitleImpl(title);
+ }
+ }
+
+ private void setTitleImpl(CharSequence title) {
mTitle = title;
if (mTitleView != null) {
mTitleView.setText(title);
diff --git a/core/jni/ActivityManager.cpp b/core/jni/ActivityManager.cpp
index 8950dfb..0bd14fa 100644
--- a/core/jni/ActivityManager.cpp
+++ b/core/jni/ActivityManager.cpp
@@ -39,7 +39,7 @@
data.writeString16(uri);
status_t ret = am->transact(OPEN_CONTENT_URI_TRANSACTION, data, &reply);
if (ret == NO_ERROR) {
- int32_t exceptionCode = reply.readInt32();
+ int32_t exceptionCode = reply.readExceptionCode();
if (!exceptionCode) {
// Success is indicated here by a nonzero int followed by the fd;
// failure by a zero int with no data following.
diff --git a/core/jni/Android.mk b/core/jni/Android.mk
index 722dacb..d19cae4 100644
--- a/core/jni/Android.mk
+++ b/core/jni/Android.mk
@@ -105,6 +105,7 @@
android/graphics/Rasterizer.cpp \
android/graphics/Region.cpp \
android/graphics/Shader.cpp \
+ android/graphics/TextLayout.cpp \
android/graphics/Typeface.cpp \
android/graphics/Xfermode.cpp \
android/graphics/YuvToJpegEncoder.cpp \
diff --git a/core/jni/android/graphics/Canvas.cpp b/core/jni/android/graphics/Canvas.cpp
index 2e49c64..558f5ff 100644
--- a/core/jni/android/graphics/Canvas.cpp
+++ b/core/jni/android/graphics/Canvas.cpp
@@ -30,6 +30,8 @@
#include "SkBoundaryPatch.h"
#include "SkMeshUtils.h"
+#include "TextLayout.h"
+
#include "unicode/ubidi.h"
#include "unicode/ushape.h"
@@ -57,24 +59,6 @@
class SkCanvasGlue {
public:
- enum {
- kDirection_LTR = 0,
- kDirection_RTL = 1
- };
-
- enum {
- kDirection_Mask = 0x1
- };
-
- enum {
- kBidi_LTR = 0,
- kBidi_RTL = 1,
- kBidi_Default_LTR = 2,
- kBidi_Default_RTL = 3,
- kBidi_Force_LTR = 4,
- kBidi_Force_RTL = 5
- };
-
static void finalizer(JNIEnv* env, jobject clazz, SkCanvas* canvas) {
canvas->unref();
}
@@ -767,192 +751,12 @@
indices, indexCount, *paint);
}
- /**
- * Character-based Arabic shaping.
- *
- * We'll use harfbuzz and glyph-based shaping instead once we're set up for it.
- *
- * @context the text context
- * @start the start of the text to render
- * @count the length of the text to render, start + count must be <= contextCount
- * @contextCount the length of the context
- * @shaped where to put the shaped text, must have capacity for count uchars
- * @return the length of the shaped text, or -1 if error
- */
- static int shapeRtlText(const jchar* context, jsize start, jsize count, jsize contextCount,
- jchar* shaped, UErrorCode &status) {
- jchar buffer[contextCount];
-
- // Use fixed length since we need to keep start and count valid
- u_shapeArabic(context, contextCount, buffer, contextCount,
- U_SHAPE_LENGTH_FIXED_SPACES_NEAR |
- U_SHAPE_TEXT_DIRECTION_LOGICAL | U_SHAPE_LETTERS_SHAPE |
- U_SHAPE_X_LAMALEF_SUB_ALTERNATE, &status);
-
- if (U_SUCCESS(status)) {
- // trim out 0xffff following ligatures, if any
- int end = 0;
- for (int i = start, e = start + count; i < e; ++i) {
- if (buffer[i] != 0xffff) {
- buffer[end++] = buffer[i];
- }
- }
- count = end;
- // LOG(LOG_INFO, "CSRTL", "start %d count %d ccount %d\n", start, count, contextCount);
- ubidi_writeReverse(buffer, count, shaped, count, UBIDI_DO_MIRRORING | UBIDI_OUTPUT_REVERSE
- | UBIDI_KEEP_BASE_COMBINING, &status);
- if (U_SUCCESS(status)) {
- return count;
- }
- }
-
- return -1;
- }
-
- /**
- * Basic character-based layout supporting rtl and arabic shaping.
- * Runs bidi on the text and generates a reordered, shaped line in buffer, returning
- * the length.
- * @text the text
- * @len the length of the text in uchars
- * @dir receives the resolved paragraph direction
- * @buffer the buffer to receive the reordered, shaped line. Must have capacity of
- * at least len jchars.
- * @flags line bidi flags
- * @return the length of the reordered, shaped line, or -1 if error
- */
- static jint layoutLine(const jchar* text, jint len, jint flags, int &dir, jchar* buffer,
- UErrorCode &status) {
- static int RTL_OPTS = UBIDI_DO_MIRRORING | UBIDI_KEEP_BASE_COMBINING |
- UBIDI_REMOVE_BIDI_CONTROLS | UBIDI_OUTPUT_REVERSE;
-
- UBiDiLevel bidiReq = 0;
- switch (flags) {
- case kBidi_LTR: bidiReq = 0; break; // no ICU constant, canonical LTR level
- case kBidi_RTL: bidiReq = 1; break; // no ICU constant, canonical RTL level
- case kBidi_Default_LTR: bidiReq = UBIDI_DEFAULT_LTR; break;
- case kBidi_Default_RTL: bidiReq = UBIDI_DEFAULT_RTL; break;
- case kBidi_Force_LTR: memcpy(buffer, text, len * sizeof(jchar)); return len;
- case kBidi_Force_RTL: return shapeRtlText(text, 0, len, len, buffer, status);
- }
-
- int32_t result = -1;
-
- UBiDi* bidi = ubidi_open();
- if (bidi) {
- ubidi_setPara(bidi, text, len, bidiReq, NULL, &status);
- if (U_SUCCESS(status)) {
- dir = ubidi_getParaLevel(bidi) & 0x1; // 0 if ltr, 1 if rtl
-
- int rc = ubidi_countRuns(bidi, &status);
- if (U_SUCCESS(status)) {
- // LOG(LOG_INFO, "LAYOUT", "para bidiReq=%d dir=%d rc=%d\n", bidiReq, dir, rc);
-
- int32_t slen = 0;
- for (int i = 0; i < rc; ++i) {
- int32_t start;
- int32_t length;
- UBiDiDirection runDir = ubidi_getVisualRun(bidi, i, &start, &length);
- // LOG(LOG_INFO, "LAYOUT", " [%2d] runDir=%d start=%3d len=%3d\n", i, runDir, start, length);
- if (runDir == UBIDI_RTL) {
- slen += shapeRtlText(text + start, 0, length, length, buffer + slen, status);
- } else {
- memcpy(buffer + slen, text + start, length * sizeof(jchar));
- slen += length;
- }
- }
- if (U_SUCCESS(status)) {
- result = slen;
- }
- }
- }
- ubidi_close(bidi);
- }
-
- return result;
- }
-
- // Returns true if we might need layout. If bidiFlags force LTR, assume no layout, if
- // bidiFlags indicate there probably is RTL, assume we do, otherwise scan the text
- // looking for a character >= the first RTL character in unicode and assume we do if
- // we find one.
- static bool needsLayout(const jchar* text, jint len, jint bidiFlags) {
- if (bidiFlags == kBidi_Force_LTR) {
- return false;
- }
- if ((bidiFlags == kBidi_RTL) || (bidiFlags == kBidi_Default_RTL) ||
- bidiFlags == kBidi_Force_RTL) {
- return true;
- }
- for (int i = 0; i < len; ++i) {
- if (text[i] >= 0x0590) {
- return true;
- }
- }
- return false;
- }
-
- // Draws a paragraph of text on a single line, running bidi and shaping
- static void drawText(JNIEnv* env, SkCanvas* canvas, const jchar* text, jsize len,
- jfloat x, jfloat y, int bidiFlags, SkPaint* paint) {
-
- SkScalar x_ = SkFloatToScalar(x);
- SkScalar y_ = SkFloatToScalar(y);
-
- SkPaint::Align horiz = paint->getTextAlign();
-
- const jchar *workText = text;
- jchar *buffer = NULL;
- int dir = kDirection_LTR;
- if (needsLayout(text, len, bidiFlags)) {
- buffer =(jchar *) malloc(len * sizeof(jchar));
- if (!buffer) {
- return;
- }
- UErrorCode status = U_ZERO_ERROR;
- len = layoutLine(text, len, bidiFlags, dir, buffer, status); // might change len, dir
- if (!U_SUCCESS(status)) {
- LOG(LOG_WARN, "LAYOUT", "drawText error %d\n", status);
- free(buffer);
- return; // can't render
- }
-
- workText = buffer; // use the shaped text
- }
-
- bool trimLeft = false;
- bool trimRight = false;
-
- switch (horiz) {
- case SkPaint::kLeft_Align: trimLeft = dir & kDirection_Mask; break;
- case SkPaint::kCenter_Align: trimLeft = trimRight = true; break;
- case SkPaint::kRight_Align: trimRight = !(dir & kDirection_Mask);
- default: break;
- }
- const jchar* workLimit = workText + len;
-
- if (trimLeft) {
- while (workText < workLimit && *workText == ' ') {
- ++workText;
- }
- }
- if (trimRight) {
- while (workLimit > workText && *(workLimit - 1) == ' ') {
- --workLimit;
- }
- }
- int32_t workBytes = (workLimit - workText) << 1;
-
- canvas->drawText(workText, workBytes, x_, y_, *paint);
-
- free(buffer);
- }
static void drawText___CIIFFIPaint(JNIEnv* env, jobject, SkCanvas* canvas,
jcharArray text, int index, int count,
jfloat x, jfloat y, int flags, SkPaint* paint) {
jchar* textArray = env->GetCharArrayElements(text, NULL);
- drawText(env, canvas, textArray + index, count, x, y, flags, paint);
+ TextLayout::drawText(paint, textArray + index, count, flags, x, y, canvas);
env->ReleaseCharArrayElements(text, textArray, JNI_ABORT);
}
@@ -961,41 +765,18 @@
int start, int end,
jfloat x, jfloat y, int flags, SkPaint* paint) {
const jchar* textArray = env->GetStringChars(text, NULL);
- drawText(env, canvas, textArray + start, end - start, x, y, flags, paint);
+ TextLayout::drawText(paint, textArray + start, end - start, flags, x, y, canvas);
env->ReleaseStringChars(text, textArray);
}
- // Draws a unidirectional run of text.
- static void drawTextRun(JNIEnv* env, SkCanvas* canvas, const jchar* chars,
- jint start, jint count, jint contextCount,
- jfloat x, jfloat y, int dirFlags, SkPaint* paint) {
-
- SkScalar x_ = SkFloatToScalar(x);
- SkScalar y_ = SkFloatToScalar(y);
-
- uint8_t rtl = dirFlags & 0x1;
- if (rtl) {
- SkAutoSTMalloc<80, jchar> buffer(contextCount);
- UErrorCode status = U_ZERO_ERROR;
- count = shapeRtlText(chars, start, count, contextCount, buffer.get(), status);
- if (U_SUCCESS(status)) {
- canvas->drawText(buffer.get(), count << 1, x_, y_, *paint);
- } else {
- LOG(LOG_WARN, "LAYOUT", "drawTextRun error %d\n", status);
- }
- } else {
- canvas->drawText(chars + start, count << 1, x_, y_, *paint);
- }
- }
-
static void drawTextRun___CIIIIFFIPaint(
JNIEnv* env, jobject, SkCanvas* canvas, jcharArray text, int index,
int count, int contextIndex, int contextCount,
jfloat x, jfloat y, int dirFlags, SkPaint* paint) {
jchar* chars = env->GetCharArrayElements(text, NULL);
- drawTextRun(env, canvas, chars + contextIndex, index - contextIndex,
- count, contextCount, x, y, dirFlags, paint);
+ TextLayout::drawTextRun(paint, chars + contextIndex, index - contextIndex,
+ count, contextCount, dirFlags, x, y, canvas);
env->ReleaseCharArrayElements(text, chars, JNI_ABORT);
}
@@ -1007,8 +788,8 @@
jint count = end - start;
jint contextCount = contextEnd - contextStart;
const jchar* chars = env->GetStringChars(text, NULL);
- drawTextRun(env, canvas, chars + contextStart, start - contextStart,
- count, contextCount, x, y, dirFlags, paint);
+ TextLayout::drawTextRun(paint, chars + contextStart, start - contextStart,
+ count, contextCount, dirFlags, x, y, canvas);
env->ReleaseStringChars(text, chars);
}
@@ -1059,31 +840,13 @@
delete[] posPtr;
}
- static void drawTextOnPath(JNIEnv *env, SkCanvas* canvas, const jchar* text, int count,
- int bidiFlags, SkPath* path, jfloat hOffset, jfloat vOffset, SkPaint* paint) {
-
- if (!needsLayout(text, count, bidiFlags)) {
- canvas->drawTextOnPathHV(text, count << 1, *path,
- SkFloatToScalar(hOffset), SkFloatToScalar(vOffset), *paint);
- return;
- }
-
- SkAutoSTMalloc<80, jchar> buffer(count);
- int dir = kDirection_LTR;
- UErrorCode status = U_ZERO_ERROR;
- count = layoutLine(text, count, bidiFlags, dir, buffer.get(), status);
- if (U_SUCCESS(status)) {
- canvas->drawTextOnPathHV(buffer.get(), count << 1, *path,
- SkFloatToScalar(hOffset), SkFloatToScalar(vOffset), *paint);
- }
- }
-
static void drawTextOnPath___CIIPathFFPaint(JNIEnv* env, jobject,
SkCanvas* canvas, jcharArray text, int index, int count,
SkPath* path, jfloat hOffset, jfloat vOffset, jint bidiFlags, SkPaint* paint) {
jchar* textArray = env->GetCharArrayElements(text, NULL);
- drawTextOnPath(env, canvas, textArray, count, bidiFlags, path, hOffset, vOffset, paint);
+ TextLayout::drawTextOnPath(paint, textArray, count, bidiFlags, hOffset, vOffset,
+ path, canvas);
env->ReleaseCharArrayElements(text, textArray, 0);
}
@@ -1092,7 +855,8 @@
jfloat hOffset, jfloat vOffset, jint bidiFlags, SkPaint* paint) {
const jchar* text_ = env->GetStringChars(text, NULL);
int count = env->GetStringLength(text);
- drawTextOnPath(env, canvas, text_, count, bidiFlags, path, hOffset, vOffset, paint);
+ TextLayout::drawTextOnPath(paint, text_, count, bidiFlags, hOffset, vOffset,
+ path, canvas);
env->ReleaseStringChars(text, text_);
}
diff --git a/core/jni/android/graphics/Paint.cpp b/core/jni/android/graphics/Paint.cpp
index ca9c9de..e4d4850 100644
--- a/core/jni/android/graphics/Paint.cpp
+++ b/core/jni/android/graphics/Paint.cpp
@@ -32,6 +32,7 @@
#include "SkTypeface.h"
#include "SkXfermode.h"
#include "unicode/ushape.h"
+#include "TextLayout.h"
// temporary for debugging
#include <utils/Log.h>
@@ -403,56 +404,14 @@
return count;
}
- static jfloat doTextRunAdvances(JNIEnv *env, SkPaint *paint, const jchar *text, jint start, jint count, jint contextCount, jint flags,
+ static jfloat doTextRunAdvances(JNIEnv *env, SkPaint *paint, const jchar *text,
+ jint start, jint count, jint contextCount, jint flags,
jfloatArray advances, jint advancesIndex) {
jfloat advancesArray[count];
- jchar buffer[contextCount];
+ jfloat totalAdvance;
- SkScalar* scalarArray = (SkScalar *)advancesArray;
- jfloat totalAdvance = 0;
-
- // this is where we'd call harfbuzz
- // for now we just use ushape.c
-
- int widths;
- if (flags & 0x1) { // rtl, call arabic shaping in case
- UErrorCode status = U_ZERO_ERROR;
- // Use fixed length since we need to keep start and count valid
- u_shapeArabic(text, contextCount, buffer, contextCount,
- U_SHAPE_LENGTH_FIXED_SPACES_NEAR |
- U_SHAPE_TEXT_DIRECTION_LOGICAL | U_SHAPE_LETTERS_SHAPE |
- U_SHAPE_X_LAMALEF_SUB_ALTERNATE, &status);
- // we shouldn't fail unless there's an out of memory condition,
- // in which case we're hosed anyway
- for (int i = start, e = i + count; i < e; ++i) {
- if (buffer[i] == 0xffff) {
- buffer[i] = 0x200b; // zero-width-space for skia
- }
- }
- widths = paint->getTextWidths(buffer + start, count << 1, scalarArray);
- } else {
- widths = paint->getTextWidths(text + start, count << 1, scalarArray);
- }
-
- if (widths < count) {
- // Skia operates on code points, not code units, so surrogate pairs return only
- // one value. Expand the result so we have one value per UTF-16 code unit.
-
- // Note, skia's getTextWidth gets confused if it encounters a surrogate pair,
- // leaving the remaining widths zero. Not nice.
- const jchar *chars = text + start;
- for (int i = 0, p = 0; i < widths; ++i) {
- totalAdvance += advancesArray[p++] = SkScalarToFloat(scalarArray[i]);
- if (p < count && chars[p] >= 0xdc00 && chars[p] < 0xe000 &&
- chars[p-1] >= 0xd800 && chars[p-1] < 0xdc00) {
- advancesArray[p++] = 0;
- }
- }
- } else {
- for (int i = 0; i < count; i++) {
- totalAdvance += advancesArray[i] = SkScalarToFloat(scalarArray[i]);
- }
- }
+ TextLayout::getTextRunAdvances(paint, text, start, count, contextCount, flags,
+ advancesArray, totalAdvance);
if (advances != NULL) {
env->SetFloatArrayRegion(advances, advancesIndex, count, advancesArray);
@@ -580,19 +539,25 @@
return result;
}
- static void getTextPath___CIIFFPath(JNIEnv* env, jobject clazz, SkPaint* paint, jcharArray text, int index, int count, jfloat x, jfloat y, SkPath* path) {
- const jchar* textArray = env->GetCharArrayElements(text, NULL);
- paint->getTextPath(textArray + index, count << 1, SkFloatToScalar(x), SkFloatToScalar(y), path);
- env->ReleaseCharArrayElements(text, const_cast<jchar*>(textArray),
- JNI_ABORT);
+ static void getTextPath(JNIEnv* env, SkPaint* paint, const jchar* text, jint count,
+ jint bidiFlags, jfloat x, jfloat y, SkPath *path) {
+ TextLayout::getTextPath(paint, text, count, bidiFlags, x, y, path);
}
-
- static void getTextPath__StringIIFFPath(JNIEnv* env, jobject clazz, SkPaint* paint, jstring text, int start, int end, jfloat x, jfloat y, SkPath* path) {
+
+ static void getTextPath___C(JNIEnv* env, jobject clazz, SkPaint* paint, jint bidiFlags,
+ jcharArray text, int index, int count, jfloat x, jfloat y, SkPath* path) {
+ const jchar* textArray = env->GetCharArrayElements(text, NULL);
+ getTextPath(env, paint, textArray + index, count, bidiFlags, x, y, path);
+ env->ReleaseCharArrayElements(text, const_cast<jchar*>(textArray), JNI_ABORT);
+ }
+
+ static void getTextPath__String(JNIEnv* env, jobject clazz, SkPaint* paint, jint bidiFlags,
+ jstring text, int start, int end, jfloat x, jfloat y, SkPath* path) {
const jchar* textArray = env->GetStringChars(text, NULL);
- paint->getTextPath(textArray + start, (end - start) << 1, SkFloatToScalar(x), SkFloatToScalar(y), path);
+ getTextPath(env, paint, textArray + start, end - start, bidiFlags, x, y, path);
env->ReleaseStringChars(text, textArray);
}
-
+
static void setShadowLayer(JNIEnv* env, jobject jpaint, jfloat radius,
jfloat dx, jfloat dy, int color) {
NPE_CHECK_RETURN_VOID(env, jpaint);
@@ -767,8 +732,8 @@
{"native_getTextRunCursor", "(I[CIIIII)I", (void*) SkPaintGlue::getTextRunCursor___C},
{"native_getTextRunCursor", "(ILjava/lang/String;IIIII)I",
(void*) SkPaintGlue::getTextRunCursor__String},
- {"native_getTextPath","(I[CIIFFI)V", (void*) SkPaintGlue::getTextPath___CIIFFPath},
- {"native_getTextPath","(ILjava/lang/String;IIFFI)V", (void*) SkPaintGlue::getTextPath__StringIIFFPath},
+ {"native_getTextPath","(II[CIIFFI)V", (void*) SkPaintGlue::getTextPath___C},
+ {"native_getTextPath","(IILjava/lang/String;IIFFI)V", (void*) SkPaintGlue::getTextPath__String},
{"nativeGetStringBounds", "(ILjava/lang/String;IILandroid/graphics/Rect;)V",
(void*) SkPaintGlue::getStringBounds },
{"nativeGetCharArrayBounds", "(I[CIILandroid/graphics/Rect;)V",
diff --git a/core/jni/android/graphics/TextLayout.cpp b/core/jni/android/graphics/TextLayout.cpp
new file mode 100644
index 0000000..e2536ee
--- /dev/null
+++ b/core/jni/android/graphics/TextLayout.cpp
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TextLayout.h"
+
+#include <android_runtime/AndroidRuntime.h>
+
+#include "SkTemplates.h"
+#include "unicode/ubidi.h"
+#include "unicode/ushape.h"
+#include <utils/Log.h>
+
+
+namespace android {
+// Returns true if we might need layout. If bidiFlags force LTR, assume no layout, if
+// bidiFlags indicate there probably is RTL, assume we do, otherwise scan the text
+// looking for a character >= the first RTL character in unicode and assume we do if
+// we find one.
+bool TextLayout::needsLayout(const jchar* text, jint len, jint bidiFlags) {
+ if (bidiFlags == kBidi_Force_LTR) {
+ return false;
+ }
+ if ((bidiFlags == kBidi_RTL) || (bidiFlags == kBidi_Default_RTL) ||
+ bidiFlags == kBidi_Force_RTL) {
+ return true;
+ }
+ for (int i = 0; i < len; ++i) {
+ if (text[i] >= 0x0590) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * Character-based Arabic shaping.
+ *
+ * We'll use harfbuzz and glyph-based shaping instead once we're set up for it.
+ *
+ * @context the text context
+ * @start the start of the text to render
+ * @count the length of the text to render, start + count must be <= contextCount
+ * @contextCount the length of the context
+ * @shaped where to put the shaped text, must have capacity for count uchars
+ * @return the length of the shaped text, or -1 if error
+ */
+int TextLayout::shapeRtlText(const jchar* context, jsize start, jsize count, jsize contextCount,
+ jchar* shaped, UErrorCode &status) {
+ jchar buffer[contextCount];
+
+ // Use fixed length since we need to keep start and count valid
+ u_shapeArabic(context, contextCount, buffer, contextCount,
+ U_SHAPE_LENGTH_FIXED_SPACES_NEAR |
+ U_SHAPE_TEXT_DIRECTION_LOGICAL | U_SHAPE_LETTERS_SHAPE |
+ U_SHAPE_X_LAMALEF_SUB_ALTERNATE, &status);
+
+ if (U_SUCCESS(status)) {
+ // trim out 0xffff following ligatures, if any
+ int end = 0;
+ for (int i = start, e = start + count; i < e; ++i) {
+ if (buffer[i] != 0xffff) {
+ buffer[end++] = buffer[i];
+ }
+ }
+ count = end;
+ // LOG(LOG_INFO, "CSRTL", "start %d count %d ccount %d\n", start, count, contextCount);
+ ubidi_writeReverse(buffer, count, shaped, count, UBIDI_DO_MIRRORING | UBIDI_OUTPUT_REVERSE
+ | UBIDI_KEEP_BASE_COMBINING, &status);
+ if (U_SUCCESS(status)) {
+ return count;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * Basic character-based layout supporting rtl and arabic shaping.
+ * Runs bidi on the text and generates a reordered, shaped line in buffer, returning
+ * the length.
+ * @text the text
+ * @len the length of the text in uchars
+ * @dir receives the resolved paragraph direction
+ * @buffer the buffer to receive the reordered, shaped line. Must have capacity of
+ * at least len jchars.
+ * @flags line bidi flags
+ * @return the length of the reordered, shaped line, or -1 if error
+ */
+jint TextLayout::layoutLine(const jchar* text, jint len, jint flags, int &dir, jchar* buffer,
+ UErrorCode &status) {
+ static const int RTL_OPTS = UBIDI_DO_MIRRORING | UBIDI_KEEP_BASE_COMBINING |
+ UBIDI_REMOVE_BIDI_CONTROLS | UBIDI_OUTPUT_REVERSE;
+
+ UBiDiLevel bidiReq = 0;
+ switch (flags) {
+ case kBidi_LTR: bidiReq = 0; break; // no ICU constant, canonical LTR level
+ case kBidi_RTL: bidiReq = 1; break; // no ICU constant, canonical RTL level
+ case kBidi_Default_LTR: bidiReq = UBIDI_DEFAULT_LTR; break;
+ case kBidi_Default_RTL: bidiReq = UBIDI_DEFAULT_RTL; break;
+ case kBidi_Force_LTR: memcpy(buffer, text, len * sizeof(jchar)); return len;
+ case kBidi_Force_RTL: return shapeRtlText(text, 0, len, len, buffer, status);
+ }
+
+ int32_t result = -1;
+
+ UBiDi* bidi = ubidi_open();
+ if (bidi) {
+ ubidi_setPara(bidi, text, len, bidiReq, NULL, &status);
+ if (U_SUCCESS(status)) {
+ dir = ubidi_getParaLevel(bidi) & 0x1; // 0 if ltr, 1 if rtl
+
+ int rc = ubidi_countRuns(bidi, &status);
+ if (U_SUCCESS(status)) {
+ // LOG(LOG_INFO, "LAYOUT", "para bidiReq=%d dir=%d rc=%d\n", bidiReq, dir, rc);
+
+ int32_t slen = 0;
+ for (int i = 0; i < rc; ++i) {
+ int32_t start;
+ int32_t length;
+ UBiDiDirection runDir = ubidi_getVisualRun(bidi, i, &start, &length);
+
+ if (runDir == UBIDI_RTL) {
+ slen += shapeRtlText(text + start, 0, length, length, buffer + slen, status);
+ } else {
+ memcpy(buffer + slen, text + start, length * sizeof(jchar));
+ slen += length;
+ }
+ }
+ if (U_SUCCESS(status)) {
+ result = slen;
+ }
+ }
+ }
+ ubidi_close(bidi);
+ }
+
+ return result;
+}
+
+// Draws or gets the path of a paragraph of text on a single line, running bidi and shaping.
+// This will draw if canvas is not null, otherwise path must be non-null and it will create
+// a path representing the text that would have been drawn.
+void TextLayout::handleText(SkPaint *paint, const jchar* text, jsize len,
+ jint bidiFlags, jfloat x, jfloat y,SkCanvas *canvas, SkPath *path) {
+
+ const jchar *workText = text;
+ jchar *buffer = NULL;
+ int dir = kDirection_LTR;
+ if (needsLayout(text, len, bidiFlags)) {
+ buffer =(jchar *) malloc(len * sizeof(jchar));
+ if (!buffer) {
+ return;
+ }
+ UErrorCode status = U_ZERO_ERROR;
+ len = layoutLine(text, len, bidiFlags, dir, buffer, status); // might change len, dir
+ if (!U_SUCCESS(status)) {
+ LOG(LOG_WARN, "LAYOUT", "drawText error %d\n", status);
+ free(buffer);
+ return; // can't render
+ }
+
+ workText = buffer; // use the shaped text
+ }
+
+ bool trimLeft = false;
+ bool trimRight = false;
+
+ SkPaint::Align horiz = paint->getTextAlign();
+ switch (horiz) {
+ case SkPaint::kLeft_Align: trimLeft = dir & kDirection_Mask; break;
+ case SkPaint::kCenter_Align: trimLeft = trimRight = true; break;
+ case SkPaint::kRight_Align: trimRight = !(dir & kDirection_Mask);
+ default: break;
+ }
+ const jchar* workLimit = workText + len;
+
+ if (trimLeft) {
+ while (workText < workLimit && *workText == ' ') {
+ ++workText;
+ }
+ }
+ if (trimRight) {
+ while (workLimit > workText && *(workLimit - 1) == ' ') {
+ --workLimit;
+ }
+ }
+
+ int32_t workBytes = (workLimit - workText) << 1;
+ SkScalar x_ = SkFloatToScalar(x);
+ SkScalar y_ = SkFloatToScalar(y);
+ if (canvas) {
+ canvas->drawText(workText, workBytes, x_, y_, *paint);
+ } else {
+ paint->getTextPath(workText, workBytes, x_, y_, path);
+ }
+
+ free(buffer);
+}
+
+void TextLayout::drawTextRun(SkPaint* paint, const jchar* chars,
+ jint start, jint count, jint contextCount,
+ int dirFlags, jfloat x, jfloat y, SkCanvas* canvas) {
+
+ SkScalar x_ = SkFloatToScalar(x);
+ SkScalar y_ = SkFloatToScalar(y);
+
+ uint8_t rtl = dirFlags & 0x1;
+ if (rtl) {
+ SkAutoSTMalloc<80, jchar> buffer(contextCount);
+ UErrorCode status = U_ZERO_ERROR;
+ count = shapeRtlText(chars, start, count, contextCount, buffer.get(), status);
+ if (U_SUCCESS(status)) {
+ canvas->drawText(buffer.get(), count << 1, x_, y_, *paint);
+ } else {
+ LOG(LOG_WARN, "LAYOUT", "drawTextRun error %d\n", status);
+ }
+ } else {
+ canvas->drawText(chars + start, count << 1, x_, y_, *paint);
+ }
+ }
+
+void TextLayout::getTextRunAdvances(SkPaint *paint, const jchar *chars, jint start,
+ jint count, jint contextCount, jint dirFlags,
+ jfloat *resultAdvances, jfloat &resultTotalAdvance) {
+ jchar buffer[contextCount];
+
+ SkScalar* scalarArray = (SkScalar *)resultAdvances;
+ resultTotalAdvance = 0;
+
+ // this is where we'd call harfbuzz
+ // for now we just use ushape.c
+
+ int widths;
+ const jchar* text;
+ if (dirFlags & 0x1) { // rtl, call arabic shaping in case
+ UErrorCode status = U_ZERO_ERROR;
+ // Use fixed length since we need to keep start and count valid
+ u_shapeArabic(chars, contextCount, buffer, contextCount,
+ U_SHAPE_LENGTH_FIXED_SPACES_NEAR |
+ U_SHAPE_TEXT_DIRECTION_LOGICAL | U_SHAPE_LETTERS_SHAPE |
+ U_SHAPE_X_LAMALEF_SUB_ALTERNATE, &status);
+ // we shouldn't fail unless there's an out of memory condition,
+ // in which case we're hosed anyway
+ for (int i = start, e = i + count; i < e; ++i) {
+ if (buffer[i] == 0xffff) {
+ buffer[i] = 0x200b; // zero-width-space for skia
+ }
+ }
+ text = buffer + start;
+ widths = paint->getTextWidths(text, count << 1, scalarArray);
+ } else {
+ text = chars + start;
+ widths = paint->getTextWidths(text, count << 1, scalarArray);
+ }
+
+ if (widths < count) {
+ // Skia operates on code points, not code units, so surrogate pairs return only
+ // one value. Expand the result so we have one value per UTF-16 code unit.
+
+ // Note, skia's getTextWidth gets confused if it encounters a surrogate pair,
+ // leaving the remaining widths zero. Not nice.
+ for (int i = 0, p = 0; i < widths; ++i) {
+ resultTotalAdvance += resultAdvances[p++] = SkScalarToFloat(scalarArray[i]);
+ if (p < count && text[p] >= 0xdc00 && text[p] < 0xe000 &&
+ text[p-1] >= 0xd800 && text[p-1] < 0xdc00) {
+ resultAdvances[p++] = 0;
+ }
+ }
+ } else {
+ for (int i = 0; i < count; i++) {
+ resultTotalAdvance += resultAdvances[i] = SkScalarToFloat(scalarArray[i]);
+ }
+ }
+}
+
+
+// Draws a paragraph of text on a single line, running bidi and shaping
+void TextLayout::drawText(SkPaint* paint, const jchar* text, jsize len,
+ int bidiFlags, jfloat x, jfloat y, SkCanvas* canvas) {
+
+ handleText(paint, text, len, bidiFlags, x, y, canvas, NULL);
+}
+
+void TextLayout::getTextPath(SkPaint *paint, const jchar *text, jsize len,
+ jint bidiFlags, jfloat x, jfloat y, SkPath *path) {
+ handleText(paint, text, len, bidiFlags, x, y, NULL, path);
+}
+
+
+void TextLayout::drawTextOnPath(SkPaint* paint, const jchar* text, int count,
+ int bidiFlags, jfloat hOffset, jfloat vOffset,
+ SkPath* path, SkCanvas* canvas) {
+
+ SkScalar h_ = SkFloatToScalar(hOffset);
+ SkScalar v_ = SkFloatToScalar(vOffset);
+
+ if (!needsLayout(text, count, bidiFlags)) {
+ canvas->drawTextOnPathHV(text, count << 1, *path, h_, v_, *paint);
+ return;
+ }
+
+ SkAutoSTMalloc<80, jchar> buffer(count);
+ int dir = kDirection_LTR;
+ UErrorCode status = U_ZERO_ERROR;
+ count = layoutLine(text, count, bidiFlags, dir, buffer.get(), status);
+ if (U_SUCCESS(status)) {
+ canvas->drawTextOnPathHV(buffer.get(), count << 1, *path, h_, v_, *paint);
+ }
+}
+
+}
diff --git a/core/jni/android/graphics/TextLayout.h b/core/jni/android/graphics/TextLayout.h
new file mode 100644
index 0000000..c0d9f75
--- /dev/null
+++ b/core/jni/android/graphics/TextLayout.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "SkCanvas.h"
+#include "SkPaint.h"
+#include "unicode/utypes.h"
+
+namespace android {
+
+class TextLayout {
+public:
+
+ enum {
+ kDirection_LTR = 0,
+ kDirection_RTL = 1,
+
+ kDirection_Mask = 0x1
+ };
+
+ enum {
+ kBidi_LTR = 0,
+ kBidi_RTL = 1,
+ kBidi_Default_LTR = 2,
+ kBidi_Default_RTL = 3,
+ kBidi_Force_LTR = 4,
+ kBidi_Force_RTL = 5,
+
+ kBidi_Mask = 0x7
+ };
+
+ /*
+ * Draws a unidirectional run of text.
+ */
+ static void drawTextRun(SkPaint* paint, const jchar* chars,
+ jint start, jint count, jint contextCount,
+ int dirFlags, jfloat x, jfloat y, SkCanvas* canvas);
+
+ static void getTextRunAdvances(SkPaint *paint, const jchar *chars, jint start,
+ jint count, jint contextCount, jint dirFlags,
+ jfloat *resultAdvances, jfloat &resultTotalAdvance);
+
+ static void drawText(SkPaint* paint, const jchar* text, jsize len,
+ jint bidiFlags, jfloat x, jfloat y, SkCanvas* canvas);
+
+ static void getTextPath(SkPaint *paint, const jchar *text, jsize len,
+ jint bidiFlags, jfloat x, jfloat y, SkPath *path);
+
+ static void drawTextOnPath(SkPaint* paint, const jchar* text, jsize len,
+ int bidiFlags, jfloat hOffset, jfloat vOffset,
+ SkPath* path, SkCanvas* canvas);
+
+private:
+ static bool needsLayout(const jchar* text, jint len, jint bidiFlags);
+ static int shapeRtlText(const jchar* context, jsize start, jsize count, jsize contextCount,
+ jchar* shaped, UErrorCode &status);
+ static jint layoutLine(const jchar* text, jint len, jint flags, int &dir, jchar* buffer,
+ UErrorCode &status);
+ static void handleText(SkPaint *paint, const jchar* text, jsize len,
+ int bidiFlags, jfloat x, jfloat y,SkCanvas *canvas, SkPath *path);
+};
+
+}
diff --git a/core/jni/android_app_NativeActivity.cpp b/core/jni/android_app_NativeActivity.cpp
index 54a9c2a..acbf854 100644
--- a/core/jni/android_app_NativeActivity.cpp
+++ b/core/jni/android_app_NativeActivity.cpp
@@ -19,6 +19,7 @@
#include <poll.h>
#include <dlfcn.h>
+#include <fcntl.h>
#include <android_runtime/AndroidRuntime.h>
#include <android_runtime/android_view_Surface.h>
@@ -33,6 +34,9 @@
#include "android_view_InputChannel.h"
#include "android_view_KeyEvent.h"
+//#define LOG_TRACE(...)
+#define LOG_TRACE(...) LOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
+
namespace android
{
@@ -42,6 +46,8 @@
jmethodID dispatchUnhandledKeyEvent;
jmethodID setWindowFlags;
jmethodID setWindowFormat;
+ jmethodID showIme;
+ jmethodID hideIme;
} gNativeActivityClassInfo;
// ------------------------------------------------------------------------
@@ -56,6 +62,8 @@
CMD_DEF_KEY = 1,
CMD_SET_WINDOW_FORMAT,
CMD_SET_WINDOW_FLAGS,
+ CMD_SHOW_SOFT_INPUT,
+ CMD_HIDE_SOFT_INPUT,
};
static void write_work(int fd, int32_t cmd, int32_t arg1=0, int32_t arg2=0) {
@@ -64,6 +72,8 @@
work.arg1 = arg1;
work.arg2 = arg2;
+ LOG_TRACE("write_work: cmd=%d", cmd);
+
restart:
int res = write(fd, &work, sizeof(work));
if (res < 0 && errno == EINTR) {
@@ -88,43 +98,177 @@
// ------------------------------------------------------------------------
-/*
- * Specialized input queue that allows unhandled key events to be dispatched
- * back to the native activity's Java framework code.
- */
-struct MyInputQueue : AInputQueue {
- explicit MyInputQueue(const android::sp<android::InputChannel>& channel, int workWrite)
- : AInputQueue(channel), mWorkWrite(workWrite) {
+} // namespace android
+
+using namespace android;
+
+AInputQueue::AInputQueue(const sp<InputChannel>& channel, int workWrite) :
+ mWorkWrite(workWrite), mConsumer(channel) {
+ int msgpipe[2];
+ if (pipe(msgpipe)) {
+ LOGW("could not create pipe: %s", strerror(errno));
+ mDispatchKeyRead = mDispatchKeyWrite = -1;
+ } else {
+ mDispatchKeyRead = msgpipe[0];
+ mDispatchKeyWrite = msgpipe[1];
+ int result = fcntl(mDispatchKeyRead, F_SETFL, O_NONBLOCK);
+ SLOGW_IF(result != 0, "Could not make AInputQueue read pipe "
+ "non-blocking: %s", strerror(errno));
+ result = fcntl(mDispatchKeyWrite, F_SETFL, O_NONBLOCK);
+ SLOGW_IF(result != 0, "Could not make AInputQueue write pipe "
+ "non-blocking: %s", strerror(errno));
}
+}
+
+AInputQueue::~AInputQueue() {
+ close(mDispatchKeyRead);
+ close(mDispatchKeyWrite);
+}
+
+void AInputQueue::attachLooper(ALooper* looper, ALooper_callbackFunc* callback, void* data) {
+ mPollLoop = static_cast<android::PollLoop*>(looper);
+ mPollLoop->setLooperCallback(mConsumer.getChannel()->getReceivePipeFd(),
+ POLLIN, callback, data);
+ mPollLoop->setLooperCallback(mDispatchKeyRead,
+ POLLIN, callback, data);
+}
+
+void AInputQueue::detachLooper() {
+ mPollLoop->removeCallback(mConsumer.getChannel()->getReceivePipeFd());
+ mPollLoop->removeCallback(mDispatchKeyRead);
+}
+
+int32_t AInputQueue::hasEvents() {
+ struct pollfd pfd[2];
+
+ pfd[0].fd = mConsumer.getChannel()->getReceivePipeFd();
+ pfd[0].events = POLLIN;
+ pfd[0].revents = 0;
+ pfd[1].fd = mDispatchKeyRead;
+ pfd[0].events = POLLIN;
+ pfd[0].revents = 0;
- virtual void doDefaultKey(android::KeyEvent* keyEvent) {
+ int nfd = poll(pfd, 2, 0);
+ if (nfd <= 0) return 0;
+ return (pfd[0].revents == POLLIN || pfd[1].revents == POLLIN) ? 1 : -1;
+}
+
+int32_t AInputQueue::getEvent(AInputEvent** outEvent) {
+ *outEvent = NULL;
+
+ char byteread;
+ ssize_t nRead = read(mDispatchKeyRead, &byteread, 1);
+ if (nRead == 1) {
mLock.lock();
- LOGI("Default key: pending=%d write=%d\n", mPendingKeys.size(), mWorkWrite);
- if (mPendingKeys.size() <= 0 && mWorkWrite >= 0) {
- write_work(mWorkWrite, CMD_DEF_KEY);
- }
- mPendingKeys.add(keyEvent);
- mLock.unlock();
- }
-
- KeyEvent* getNextEvent() {
- KeyEvent* event = NULL;
-
- mLock.lock();
- if (mPendingKeys.size() > 0) {
- event = mPendingKeys[0];
- mPendingKeys.removeAt(0);
+ if (mDispatchingKeys.size() > 0) {
+ KeyEvent* kevent = mDispatchingKeys[0];
+ *outEvent = kevent;
+ mDispatchingKeys.removeAt(0);
+ mDeliveringKeys.add(kevent);
}
mLock.unlock();
-
- return event;
+ if (*outEvent != NULL) {
+ return 0;
+ }
}
- int mWorkWrite;
+ int32_t res = mConsumer.receiveDispatchSignal();
+ if (res != android::OK) {
+ LOGE("channel '%s' ~ Failed to receive dispatch signal. status=%d",
+ mConsumer.getChannel()->getName().string(), res);
+ return -1;
+ }
+
+ InputEvent* myEvent = NULL;
+ res = mConsumer.consume(&mInputEventFactory, &myEvent);
+ if (res != android::OK) {
+ LOGW("channel '%s' ~ Failed to consume input event. status=%d",
+ mConsumer.getChannel()->getName().string(), res);
+ mConsumer.sendFinishedSignal();
+ return -1;
+ }
+
+ *outEvent = myEvent;
+ return 0;
+}
+
+void AInputQueue::finishEvent(AInputEvent* event, bool handled) {
+ bool needFinished = true;
+
+ if (!handled && ((InputEvent*)event)->getType() == INPUT_EVENT_TYPE_KEY
+ && ((KeyEvent*)event)->hasDefaultAction()) {
+ // The app didn't handle this, but it may have a default action
+ // associated with it. We need to hand this back to Java to be
+ // executed.
+ doDefaultKey((KeyEvent*)event);
+ needFinished = false;
+ }
+
+ const size_t N = mDeliveringKeys.size();
+ for (size_t i=0; i<N; i++) {
+ if (mDeliveringKeys[i] == event) {
+ delete event;
+ mDeliveringKeys.removeAt(i);
+ needFinished = false;
+ break;
+ }
+ }
- Mutex mLock;
- Vector<KeyEvent*> mPendingKeys;
-};
+ if (needFinished) {
+ int32_t res = mConsumer.sendFinishedSignal();
+ if (res != android::OK) {
+ LOGW("Failed to send finished signal on channel '%s'. status=%d",
+ mConsumer.getChannel()->getName().string(), res);
+ }
+ }
+}
+
+void AInputQueue::dispatchEvent(android::KeyEvent* event) {
+ mLock.lock();
+ LOG_TRACE("dispatchEvent: dispatching=%d write=%d\n", mDispatchingKeys.size(),
+ mDispatchKeyWrite);
+ mDispatchingKeys.add(event);
+ mLock.unlock();
+
+restart:
+ char dummy = 0;
+ int res = write(mDispatchKeyWrite, &dummy, sizeof(dummy));
+ if (res < 0 && errno == EINTR) {
+ goto restart;
+ }
+
+ if (res == sizeof(dummy)) return;
+
+ if (res < 0) LOGW("Failed writing to dispatch fd: %s", strerror(errno));
+ else LOGW("Truncated writing to dispatch fd: %d", res);
+}
+
+KeyEvent* AInputQueue::consumeUnhandledEvent() {
+ KeyEvent* event = NULL;
+
+ mLock.lock();
+ if (mPendingKeys.size() > 0) {
+ event = mPendingKeys[0];
+ mPendingKeys.removeAt(0);
+ }
+ mLock.unlock();
+
+ LOG_TRACE("consumeUnhandledEvent: KeyEvent=%p", event);
+
+ return event;
+}
+
+void AInputQueue::doDefaultKey(KeyEvent* keyEvent) {
+ mLock.lock();
+ LOG_TRACE("Default key: pending=%d write=%d\n", mPendingKeys.size(), mWorkWrite);
+ if (mPendingKeys.size() <= 0 && mWorkWrite >= 0) {
+ write_work(mWorkWrite, CMD_DEF_KEY);
+ }
+ mPendingKeys.add(keyEvent);
+ mLock.unlock();
+}
+
+namespace android {
// ------------------------------------------------------------------------
@@ -133,8 +277,8 @@
*/
struct NativeCode : public ANativeActivity {
NativeCode(void* _dlhandle, ANativeActivity_createFunc* _createFunc) {
- memset((ANativeActivity*)this, sizeof(ANativeActivity), 0);
- memset(&callbacks, sizeof(callbacks), 0);
+ memset((ANativeActivity*)this, 0, sizeof(ANativeActivity));
+ memset(&callbacks, 0, sizeof(callbacks));
dlhandle = _dlhandle;
createActivityFunc = _createFunc;
nativeWindow = NULL;
@@ -188,7 +332,7 @@
sp<InputChannel> ic =
android_view_InputChannel_getInputChannel(env, _channel);
if (ic != NULL) {
- nativeInputQueue = new MyInputQueue(ic, mainWorkWrite);
+ nativeInputQueue = new AInputQueue(ic, mainWorkWrite);
if (nativeInputQueue->getConsumer().initialize() != android::OK) {
delete nativeInputQueue;
nativeInputQueue = NULL;
@@ -210,8 +354,11 @@
String8 externalDataPath;
sp<ANativeWindow> nativeWindow;
+ int32_t lastWindowWidth;
+ int32_t lastWindowHeight;
+
jobject inputChannel;
- struct MyInputQueue* nativeInputQueue;
+ struct AInputQueue* nativeInputQueue;
// These are used to wake up the main thread to process work.
int mainWorkRead;
@@ -231,6 +378,18 @@
write_work(code->mainWorkWrite, CMD_SET_WINDOW_FLAGS, values, mask);
}
+void android_NativeActivity_showSoftInput(
+ ANativeActivity* activity, int32_t flags) {
+ NativeCode* code = static_cast<NativeCode*>(activity);
+ write_work(code->mainWorkWrite, CMD_SHOW_SOFT_INPUT, flags);
+}
+
+void android_NativeActivity_hideSoftInput(
+ ANativeActivity* activity, int32_t flags) {
+ NativeCode* code = static_cast<NativeCode*>(activity);
+ write_work(code->mainWorkWrite, CMD_HIDE_SOFT_INPUT, flags);
+}
+
// ------------------------------------------------------------------------
/*
@@ -246,10 +405,13 @@
if (!read_work(code->mainWorkRead, &work)) {
return true;
}
+
+ LOG_TRACE("mainWorkCallback: cmd=%d", work.cmd);
+
switch (work.cmd) {
case CMD_DEF_KEY: {
KeyEvent* keyEvent;
- while ((keyEvent=code->nativeInputQueue->getNextEvent()) != NULL) {
+ while ((keyEvent=code->nativeInputQueue->consumeUnhandledEvent()) != NULL) {
jobject inputEventObj = android_view_KeyEvent_fromNative(
code->env, keyEvent);
code->env->CallVoidMethod(code->clazz,
@@ -269,6 +431,14 @@
code->env->CallVoidMethod(code->clazz,
gNativeActivityClassInfo.setWindowFlags, work.arg1, work.arg2);
} break;
+ case CMD_SHOW_SOFT_INPUT: {
+ code->env->CallVoidMethod(code->clazz,
+ gNativeActivityClassInfo.showIme, work.arg1);
+ } break;
+ case CMD_HIDE_SOFT_INPUT: {
+ code->env->CallVoidMethod(code->clazz,
+ gNativeActivityClassInfo.hideIme, work.arg1);
+ } break;
default:
LOGW("Unknown work command: %d", work.cmd);
break;
@@ -283,6 +453,8 @@
loadNativeCode_native(JNIEnv* env, jobject clazz, jstring path, jobject messageQueue,
jstring internalDataDir, jstring externalDataDir, int sdkVersion)
{
+ LOG_TRACE("loadNativeCode_native");
+
const char* pathStr = env->GetStringUTFChars(path, NULL);
NativeCode* code = NULL;
@@ -314,6 +486,12 @@
}
code->mainWorkRead = msgpipe[0];
code->mainWorkWrite = msgpipe[1];
+ int result = fcntl(code->mainWorkRead, F_SETFL, O_NONBLOCK);
+ SLOGW_IF(result != 0, "Could not make main work read pipe "
+ "non-blocking: %s", strerror(errno));
+ result = fcntl(code->mainWorkWrite, F_SETFL, O_NONBLOCK);
+ SLOGW_IF(result != 0, "Could not make main work write pipe "
+ "non-blocking: %s", strerror(errno));
code->pollLoop->setCallback(code->mainWorkRead, POLLIN, mainWorkCallback, code);
code->ANativeActivity::callbacks = &code->callbacks;
@@ -346,6 +524,7 @@
static void
unloadNativeCode_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("unloadNativeCode_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
delete code;
@@ -355,6 +534,7 @@
static void
onStart_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("onStart_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onStart != NULL) {
@@ -366,6 +546,7 @@
static void
onResume_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("onResume_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onResume != NULL) {
@@ -377,6 +558,7 @@
static void
onSaveInstanceState_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("onSaveInstanceState_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onSaveInstanceState != NULL) {
@@ -389,6 +571,7 @@
static void
onPause_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("onPause_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onPause != NULL) {
@@ -400,6 +583,7 @@
static void
onStop_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("onStop_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onStop != NULL) {
@@ -411,6 +595,7 @@
static void
onLowMemory_native(JNIEnv* env, jobject clazz, jint handle)
{
+ LOG_TRACE("onLowMemory_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onLowMemory != NULL) {
@@ -422,6 +607,7 @@
static void
onWindowFocusChanged_native(JNIEnv* env, jobject clazz, jint handle, jboolean focused)
{
+ LOG_TRACE("onWindowFocusChanged_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->callbacks.onWindowFocusChanged != NULL) {
@@ -433,6 +619,7 @@
static void
onSurfaceCreated_native(JNIEnv* env, jobject clazz, jint handle, jobject surface)
{
+ LOG_TRACE("onSurfaceCreated_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
code->setSurface(surface);
@@ -443,10 +630,17 @@
}
}
+static int32_t getWindowProp(ANativeWindow* window, int what) {
+ int value;
+ int res = window->query(window, what, &value);
+ return res < 0 ? res : value;
+}
+
static void
onSurfaceChanged_native(JNIEnv* env, jobject clazz, jint handle, jobject surface,
jint format, jint width, jint height)
{
+ LOG_TRACE("onSurfaceChanged_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
sp<ANativeWindow> oldNativeWindow = code->nativeWindow;
@@ -456,10 +650,41 @@
code->callbacks.onNativeWindowDestroyed(code,
oldNativeWindow.get());
}
- if (code->nativeWindow != NULL && code->callbacks.onNativeWindowCreated != NULL) {
- code->callbacks.onNativeWindowCreated(code,
- code->nativeWindow.get());
+ if (code->nativeWindow != NULL) {
+ if (code->callbacks.onNativeWindowCreated != NULL) {
+ code->callbacks.onNativeWindowCreated(code,
+ code->nativeWindow.get());
+ }
+ code->lastWindowWidth = getWindowProp(code->nativeWindow.get(),
+ NATIVE_WINDOW_WIDTH);
+ code->lastWindowHeight = getWindowProp(code->nativeWindow.get(),
+ NATIVE_WINDOW_HEIGHT);
}
+ } else {
+ // Maybe it resized?
+ int32_t newWidth = getWindowProp(code->nativeWindow.get(),
+ NATIVE_WINDOW_WIDTH);
+ int32_t newHeight = getWindowProp(code->nativeWindow.get(),
+ NATIVE_WINDOW_HEIGHT);
+ if (newWidth != code->lastWindowWidth
+ || newHeight != code->lastWindowHeight) {
+ if (code->callbacks.onNativeWindowResized != NULL) {
+ code->callbacks.onNativeWindowResized(code,
+ code->nativeWindow.get());
+ }
+ }
+ }
+ }
+}
+
+static void
+onSurfaceRedrawNeeded_native(JNIEnv* env, jobject clazz, jint handle)
+{
+ LOG_TRACE("onSurfaceRedrawNeeded_native");
+ if (handle != 0) {
+ NativeCode* code = (NativeCode*)handle;
+ if (code->nativeWindow != NULL && code->callbacks.onNativeWindowRedrawNeeded != NULL) {
+ code->callbacks.onNativeWindowRedrawNeeded(code, code->nativeWindow.get());
}
}
}
@@ -467,6 +692,7 @@
static void
onSurfaceDestroyed_native(JNIEnv* env, jobject clazz, jint handle, jobject surface)
{
+ LOG_TRACE("onSurfaceDestroyed_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->nativeWindow != NULL && code->callbacks.onNativeWindowDestroyed != NULL) {
@@ -480,6 +706,7 @@
static void
onInputChannelCreated_native(JNIEnv* env, jobject clazz, jint handle, jobject channel)
{
+ LOG_TRACE("onInputChannelCreated_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
status_t err = code->setInputChannel(channel);
@@ -498,6 +725,7 @@
static void
onInputChannelDestroyed_native(JNIEnv* env, jobject clazz, jint handle, jobject channel)
{
+ LOG_TRACE("onInputChannelDestroyed_native");
if (handle != 0) {
NativeCode* code = (NativeCode*)handle;
if (code->nativeInputQueue != NULL
@@ -509,6 +737,38 @@
}
}
+static void
+onContentRectChanged_native(JNIEnv* env, jobject clazz, jint handle,
+ jint x, jint y, jint w, jint h)
+{
+ LOG_TRACE("onContentRectChanged_native");
+ if (handle != 0) {
+ NativeCode* code = (NativeCode*)handle;
+ if (code->callbacks.onContentRectChanged != NULL) {
+ ARect rect;
+ rect.left = x;
+ rect.top = y;
+ rect.right = x+w;
+ rect.bottom = y+h;
+ code->callbacks.onContentRectChanged(code, &rect);
+ }
+ }
+}
+
+static void
+dispatchKeyEvent_native(JNIEnv* env, jobject clazz, jint handle, jobject eventObj)
+{
+ LOG_TRACE("dispatchKeyEvent_native");
+ if (handle != 0) {
+ NativeCode* code = (NativeCode*)handle;
+ if (code->nativeInputQueue != NULL) {
+ KeyEvent* event = new KeyEvent();
+ android_view_KeyEvent_toNative(env, eventObj, INPUT_EVENT_NATURE_KEY, event);
+ code->nativeInputQueue->dispatchEvent(event);
+ }
+ }
+}
+
static const JNINativeMethod g_methods[] = {
{ "loadNativeCode", "(Ljava/lang/String;Landroid/os/MessageQueue;Ljava/lang/String;Ljava/lang/String;I)I",
(void*)loadNativeCode_native },
@@ -522,9 +782,12 @@
{ "onWindowFocusChangedNative", "(IZ)V", (void*)onWindowFocusChanged_native },
{ "onSurfaceCreatedNative", "(ILandroid/view/Surface;)V", (void*)onSurfaceCreated_native },
{ "onSurfaceChangedNative", "(ILandroid/view/Surface;III)V", (void*)onSurfaceChanged_native },
+ { "onSurfaceRedrawNeededNative", "(ILandroid/view/Surface;)V", (void*)onSurfaceRedrawNeeded_native },
{ "onSurfaceDestroyedNative", "(I)V", (void*)onSurfaceDestroyed_native },
{ "onInputChannelCreatedNative", "(ILandroid/view/InputChannel;)V", (void*)onInputChannelCreated_native },
{ "onInputChannelDestroyedNative", "(ILandroid/view/InputChannel;)V", (void*)onInputChannelDestroyed_native },
+ { "onContentRectChangedNative", "(IIIII)V", (void*)onContentRectChanged_native },
+ { "dispatchKeyEventNative", "(ILandroid/view/KeyEvent;)V", (void*)dispatchKeyEvent_native },
};
static const char* const kNativeActivityPathName = "android/app/NativeActivity";
@@ -554,6 +817,12 @@
GET_METHOD_ID(gNativeActivityClassInfo.setWindowFormat,
gNativeActivityClassInfo.clazz,
"setWindowFormat", "(I)V");
+ GET_METHOD_ID(gNativeActivityClassInfo.showIme,
+ gNativeActivityClassInfo.clazz,
+ "showIme", "(I)V");
+ GET_METHOD_ID(gNativeActivityClassInfo.hideIme,
+ gNativeActivityClassInfo.clazz,
+ "hideIme", "(I)V");
return AndroidRuntime::registerNativeMethods(
env, kNativeActivityPathName,
diff --git a/core/jni/android_database_SQLiteDatabase.cpp b/core/jni/android_database_SQLiteDatabase.cpp
index 5a92193..290b532 100644
--- a/core/jni/android_database_SQLiteDatabase.cpp
+++ b/core/jni/android_database_SQLiteDatabase.cpp
@@ -62,6 +62,7 @@
};
static jfieldID offset_db_handle;
+static jmethodID method_custom_function_callback;
static char *createStr(const char *path, short extra) {
int len = strlen(path) + extra;
@@ -458,6 +459,62 @@
}
}
+static void custom_function_callback(sqlite3_context * context, int argc, sqlite3_value ** argv) {
+ JNIEnv* env = AndroidRuntime::getJNIEnv();
+ if (!env) {
+ LOGE("custom_function_callback cannot call into Java on this thread");
+ return;
+ }
+
+ // pack up the arguments into a string array
+ jobjectArray strArray = env->NewObjectArray(argc, env->FindClass("java/lang/String"), NULL);
+ if (!strArray) {
+ jniThrowException(env, "java/lang/OutOfMemoryError", NULL);
+ return;
+ }
+ for (int i = 0; i < argc; i++) {
+ char* arg = (char *)sqlite3_value_text(argv[i]);
+ jobject obj = env->NewStringUTF(arg);
+ if (!obj) {
+ jniThrowException(env, "java/lang/OutOfMemoryError", NULL);
+ return;
+ }
+ env->SetObjectArrayElement(strArray, i, obj);
+ env->DeleteLocalRef(obj);
+ }
+
+ // get global ref to CustomFunction object from our user data
+ jobject function = (jobject)sqlite3_user_data(context);
+ env->CallVoidMethod(function, method_custom_function_callback, strArray);
+}
+
+static jint native_addCustomFunction(JNIEnv* env, jobject object,
+ jstring name, jint numArgs, jobject function)
+{
+ sqlite3 * handle = (sqlite3 *)env->GetIntField(object, offset_db_handle);
+ char const *nameStr = env->GetStringUTFChars(name, NULL);
+ jobject ref = env->NewGlobalRef(function);
+ LOGD("native_addCustomFunction %s ref: %d", nameStr, ref);
+ int err = sqlite3_create_function(handle, nameStr, numArgs, SQLITE_UTF8,
+ (void *)ref, custom_function_callback, NULL, NULL);
+ env->ReleaseStringUTFChars(name, nameStr);
+
+ if (err == SQLITE_OK)
+ return (int)ref;
+ else {
+ LOGE("sqlite3_create_function returned %d", err);
+ env->DeleteGlobalRef(ref);
+ throw_sqlite3_exception(env, handle);
+ return 0;
+ }
+}
+
+static void native_releaseCustomFunction(JNIEnv* env, jobject object, jint ref)
+{
+ LOGD("native_releaseCustomFunction %d", ref);
+ env->DeleteGlobalRef((jobject)ref);
+}
+
static JNINativeMethod sMethods[] =
{
/* name, signature, funcPtr */
@@ -472,6 +529,10 @@
{"native_getDbLookaside", "()I", (void *)native_getDbLookaside},
{"releaseMemory", "()I", (void *)native_releaseMemory},
{"native_finalize", "(I)V", (void *)native_finalize},
+ {"native_addCustomFunction",
+ "(Ljava/lang/String;ILandroid/database/sqlite/SQLiteDatabase$CustomFunction;)I",
+ (void *)native_addCustomFunction},
+ {"native_releaseCustomFunction", "(I)V", (void *)native_releaseCustomFunction},
};
int register_android_database_SQLiteDatabase(JNIEnv *env)
@@ -490,6 +551,17 @@
return -1;
}
+ clazz = env->FindClass("android/database/sqlite/SQLiteDatabase$CustomFunction");
+ if (clazz == NULL) {
+ LOGE("Can't find android/database/sqlite/SQLiteDatabase$CustomFunction\n");
+ return -1;
+ }
+ method_custom_function_callback = env->GetMethodID(clazz, "callback", "([Ljava/lang/String;)V");
+ if (method_custom_function_callback == NULL) {
+ LOGE("Can't find method SQLiteDatabase.CustomFunction.callback\n");
+ return -1;
+ }
+
return AndroidRuntime::registerNativeMethods(env, "android/database/sqlite/SQLiteDatabase",
sMethods, NELEM(sMethods));
}
diff --git a/core/jni/android_net_NetUtils.cpp b/core/jni/android_net_NetUtils.cpp
index feb0dad..3cde9d6 100644
--- a/core/jni/android_net_NetUtils.cpp
+++ b/core/jni/android_net_NetUtils.cpp
@@ -222,10 +222,10 @@
{ "enableInterface", "(Ljava/lang/String;)I", (void *)android_net_utils_enableInterface },
{ "disableInterface", "(Ljava/lang/String;)I", (void *)android_net_utils_disableInterface },
- { "addHostRoute", "(Ljava/lang/String;I)I", (void *)android_net_utils_addHostRoute },
+ { "addHostRouteNative", "(Ljava/lang/String;I)I", (void *)android_net_utils_addHostRoute },
{ "removeHostRoutes", "(Ljava/lang/String;)I", (void *)android_net_utils_removeHostRoutes },
- { "setDefaultRoute", "(Ljava/lang/String;I)I", (void *)android_net_utils_setDefaultRoute },
- { "getDefaultRoute", "(Ljava/lang/String;)I", (void *)android_net_utils_getDefaultRoute },
+ { "setDefaultRouteNative", "(Ljava/lang/String;I)I", (void *)android_net_utils_setDefaultRoute },
+ { "getDefaultRouteNative", "(Ljava/lang/String;)I", (void *)android_net_utils_getDefaultRoute },
{ "removeDefaultRoute", "(Ljava/lang/String;)I", (void *)android_net_utils_removeDefaultRoute },
{ "resetConnections", "(Ljava/lang/String;)I", (void *)android_net_utils_resetConnections },
{ "runDhcp", "(Ljava/lang/String;Landroid/net/DhcpInfo;)Z", (void *)android_net_utils_runDhcp },
diff --git a/core/jni/android_os_Debug.cpp b/core/jni/android_os_Debug.cpp
index 3ee404a..4a877d2 100644
--- a/core/jni/android_os_Debug.cpp
+++ b/core/jni/android_os_Debug.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#define LOG_TAG "android.os.Debug"
#include "JNIHelp.h"
#include "jni.h"
#include "utils/misc.h"
@@ -24,6 +25,8 @@
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
+#include <errno.h>
+#include <assert.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
@@ -274,6 +277,176 @@
jint android_os_Debug_getProxyObjectCount(JNIEnv* env, jobject clazz);
jint android_os_Debug_getDeathObjectCount(JNIEnv* env, jobject clazz);
+
+#ifdef HAVE_ANDROID_OS
+/* pulled out of bionic */
+extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
+ size_t* infoSize, size_t* totalMemory, size_t* backtraceSize);
+extern "C" void free_malloc_leak_info(uint8_t* info);
+#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
+#define BACKTRACE_SIZE 32
+
+/*
+ * This is a qsort() callback.
+ *
+ * See dumpNativeHeap() for comments about the data format and sort order.
+ */
+static int compareHeapRecords(const void* vrec1, const void* vrec2)
+{
+ const size_t* rec1 = (const size_t*) vrec1;
+ const size_t* rec2 = (const size_t*) vrec2;
+ size_t size1 = *rec1;
+ size_t size2 = *rec2;
+
+ if (size1 < size2) {
+ return 1;
+ } else if (size1 > size2) {
+ return -1;
+ }
+
+ intptr_t* bt1 = (intptr_t*)(rec1 + 2);
+ intptr_t* bt2 = (intptr_t*)(rec2 + 2);
+ for (size_t idx = 0; idx < BACKTRACE_SIZE; idx++) {
+ intptr_t addr1 = bt1[idx];
+ intptr_t addr2 = bt2[idx];
+ if (addr1 == addr2) {
+ if (addr1 == 0)
+ break;
+ continue;
+ }
+ if (addr1 < addr2) {
+ return -1;
+ } else if (addr1 > addr2) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The get_malloc_leak_info() call returns an array of structs that
+ * look like this:
+ *
+ * size_t size
+ * size_t allocations
+ * intptr_t backtrace[32]
+ *
+ * "size" is the size of the allocation, "backtrace" is a fixed-size
+ * array of function pointers, and "allocations" is the number of
+ * allocations with the exact same size and backtrace.
+ *
+ * The entries are sorted by descending total size (i.e. size*allocations)
+ * then allocation count. For best results with "diff" we'd like to sort
+ * primarily by individual size then stack trace. Since the entries are
+ * fixed-size, and we're allowed (by the current implementation) to mangle
+ * them, we can do this in place.
+ */
+static void dumpNativeHeap(FILE* fp)
+{
+ uint8_t* info = NULL;
+ size_t overallSize, infoSize, totalMemory, backtraceSize;
+
+ get_malloc_leak_info(&info, &overallSize, &infoSize, &totalMemory,
+ &backtraceSize);
+ if (info == NULL) {
+ fprintf(fp, "Native heap dump not available. To enable, run these"
+ " commands (requires root):\n");
+ fprintf(fp, "$ adb shell setprop libc.debug.malloc 1\n");
+ fprintf(fp, "$ adb shell stop\n");
+ fprintf(fp, "$ adb shell start\n");
+ return;
+ }
+ assert(infoSize != 0);
+ assert(overallSize % infoSize == 0);
+
+ fprintf(fp, "Android Native Heap Dump v1.0\n\n");
+
+ size_t recordCount = overallSize / infoSize;
+ fprintf(fp, "Total memory: %zu\n", totalMemory);
+ fprintf(fp, "Allocation records: %zd\n", recordCount);
+ if (backtraceSize != BACKTRACE_SIZE) {
+ fprintf(fp, "WARNING: mismatched backtrace sizes (%d vs. %d)\n",
+ backtraceSize, BACKTRACE_SIZE);
+ }
+ fprintf(fp, "\n");
+
+ /* re-sort the entries */
+ qsort(info, recordCount, infoSize, compareHeapRecords);
+
+ /* dump the entries to the file */
+ const uint8_t* ptr = info;
+ for (size_t idx = 0; idx < recordCount; idx++) {
+ size_t size = *(size_t*) ptr;
+ size_t allocations = *(size_t*) (ptr + sizeof(size_t));
+ intptr_t* backtrace = (intptr_t*) (ptr + sizeof(size_t) * 2);
+
+ fprintf(fp, "z %d sz %8zu num %4zu bt",
+ (size & SIZE_FLAG_ZYGOTE_CHILD) != 0,
+ size & ~SIZE_FLAG_ZYGOTE_CHILD,
+ allocations);
+ for (size_t bt = 0; bt < backtraceSize; bt++) {
+ if (backtrace[bt] == 0) {
+ break;
+ } else {
+ fprintf(fp, " %08x", backtrace[bt]);
+ }
+ }
+ fprintf(fp, "\n");
+
+ ptr += infoSize;
+ }
+
+ fprintf(fp, "END\n");
+ free_malloc_leak_info(info);
+}
+#endif /*HAVE_ANDROID_OS*/
+
+/*
+ * Dump the native heap, writing human-readable output to the specified
+ * file descriptor.
+ */
+static void android_os_Debug_dumpNativeHeap(JNIEnv* env, jobject clazz,
+ jobject fileDescriptor)
+{
+ if (fileDescriptor == NULL) {
+ jniThrowNullPointerException(env, NULL);
+ return;
+ }
+ int origFd = jniGetFDFromFileDescriptor(env, fileDescriptor);
+ if (origFd < 0) {
+ jniThrowRuntimeException(env, "Invalid file descriptor");
+ return;
+ }
+
+ /* dup() the descriptor so we don't close the original with fclose() */
+ int fd = dup(origFd);
+ if (fd < 0) {
+ LOGW("dup(%d) failed: %s\n", origFd, strerror(errno));
+ jniThrowRuntimeException(env, "dup() failed");
+ return;
+ }
+
+ FILE* fp = fdopen(fd, "w");
+ if (fp == NULL) {
+ LOGW("fdopen(%d) failed: %s\n", fd, strerror(errno));
+ close(fd);
+ jniThrowRuntimeException(env, "fdopen() failed");
+ return;
+ }
+
+#ifdef HAVE_ANDROID_OS
+ LOGD("Native heap dump starting...\n");
+ dumpNativeHeap(fp);
+ LOGD("Native heap dump complete.\n");
+#else
+ fprintf(fp, "Native heap dump not available on this platform\n");
+#endif
+
+ fclose(fp);
+}
+
+
/*
* JNI registration.
*/
@@ -289,6 +462,8 @@
(void*) android_os_Debug_getDirtyPages },
{ "getMemoryInfo", "(ILandroid/os/Debug$MemoryInfo;)V",
(void*) android_os_Debug_getDirtyPagesPid },
+ { "dumpNativeHeap", "(Ljava/io/FileDescriptor;)V",
+ (void*) android_os_Debug_dumpNativeHeap },
{ "getBinderSentTransactions", "()I",
(void*) android_os_Debug_getBinderSentTransactions },
{ "getBinderReceivedTransactions", "()I",
@@ -320,4 +495,4 @@
return jniRegisterNativeMethods(env, "android/os/Debug", gMethods, NELEM(gMethods));
}
-};
+}; // namespace android
diff --git a/core/res/res/drawable-hdpi/selection_end_handle.png b/core/res/res/drawable-hdpi/selection_end_handle.png
new file mode 100644
index 0000000..624ab58
--- /dev/null
+++ b/core/res/res/drawable-hdpi/selection_end_handle.png
Binary files differ
diff --git a/core/res/res/drawable-hdpi/selection_start_handle.png b/core/res/res/drawable-hdpi/selection_start_handle.png
new file mode 100644
index 0000000..7d6f24c
--- /dev/null
+++ b/core/res/res/drawable-hdpi/selection_start_handle.png
Binary files differ
diff --git a/core/res/res/drawable-mdpi/selection_end_handle.png b/core/res/res/drawable-mdpi/selection_end_handle.png
new file mode 100644
index 0000000..7e075eb
--- /dev/null
+++ b/core/res/res/drawable-mdpi/selection_end_handle.png
Binary files differ
diff --git a/core/res/res/drawable-mdpi/selection_start_handle.png b/core/res/res/drawable-mdpi/selection_start_handle.png
new file mode 100644
index 0000000..d8022f7
--- /dev/null
+++ b/core/res/res/drawable-mdpi/selection_start_handle.png
Binary files differ
diff --git a/core/res/res/drawable-xlarge/default_wallpaper.jpg b/core/res/res/drawable-xlarge/default_wallpaper.jpg
new file mode 100644
index 0000000..0302f00
--- /dev/null
+++ b/core/res/res/drawable-xlarge/default_wallpaper.jpg
Binary files differ
diff --git a/core/res/res/layout/list_content.xml b/core/res/res/layout/list_content.xml
index 6f9f1e0..1414032 100644
--- a/core/res/res/layout/list_content.xml
+++ b/core/res/res/layout/list_content.xml
@@ -1,8 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
-/* //device/apps/common/assets/res/layout/list_content.xml
-**
-** Copyright 2006, The Android Open Source Project
+/* Copyright 2010, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -17,8 +15,42 @@
** limitations under the License.
*/
-->
-<ListView xmlns:android="http://schemas.android.com/apk/res/android" android:id="@android:id/list"
- android:layout_width="match_parent"
- android:layout_height="match_parent"
- android:drawSelectorOnTop="false"
- />
+<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent">
+
+ <LinearLayout android:id="@+id/progressContainer"
+ android:orientation="vertical"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:visibility="gone"
+ android:gravity="center">
+
+ <ProgressBar style="?android:attr/progressBarStyleLarge"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content" />
+ <TextView android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:textAppearance="?android:attr/textAppearanceSmall"
+ android:text="@string/loading"
+ android:paddingTop="4dip"
+ android:singleLine="true" />
+
+ </LinearLayout>
+
+ <FrameLayout android:id="@+id/listContainer"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent">
+
+ <ListView android:id="@android:id/list"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:drawSelectorOnTop="false" />
+ <TextView android:id="@+android:id/internalEmpty"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:gravity="center"
+ android:textAppearance="?android:attr/textAppearanceLarge" />
+ </FrameLayout>
+
+</FrameLayout>
diff --git a/core/res/res/layout/list_content_rich.xml b/core/res/res/layout/list_content_rich.xml
deleted file mode 100644
index 1414032..0000000
--- a/core/res/res/layout/list_content_rich.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
-/* Copyright 2010, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
--->
-<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
- android:layout_width="match_parent"
- android:layout_height="match_parent">
-
- <LinearLayout android:id="@+id/progressContainer"
- android:orientation="vertical"
- android:layout_width="match_parent"
- android:layout_height="match_parent"
- android:visibility="gone"
- android:gravity="center">
-
- <ProgressBar style="?android:attr/progressBarStyleLarge"
- android:layout_width="wrap_content"
- android:layout_height="wrap_content" />
- <TextView android:layout_width="wrap_content"
- android:layout_height="wrap_content"
- android:textAppearance="?android:attr/textAppearanceSmall"
- android:text="@string/loading"
- android:paddingTop="4dip"
- android:singleLine="true" />
-
- </LinearLayout>
-
- <FrameLayout android:id="@+id/listContainer"
- android:layout_width="match_parent"
- android:layout_height="match_parent">
-
- <ListView android:id="@android:id/list"
- android:layout_width="match_parent"
- android:layout_height="match_parent"
- android:drawSelectorOnTop="false" />
- <TextView android:id="@+android:id/internalEmpty"
- android:layout_width="match_parent"
- android:layout_height="match_parent"
- android:gravity="center"
- android:textAppearance="?android:attr/textAppearanceLarge" />
- </FrameLayout>
-
-</FrameLayout>
diff --git a/core/res/res/layout/list_content_simple.xml b/core/res/res/layout/list_content_simple.xml
new file mode 100644
index 0000000..6f9f1e0
--- /dev/null
+++ b/core/res/res/layout/list_content_simple.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+/* //device/apps/common/assets/res/layout/list_content.xml
+**
+** Copyright 2006, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+-->
+<ListView xmlns:android="http://schemas.android.com/apk/res/android" android:id="@android:id/list"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:drawSelectorOnTop="false"
+ />
diff --git a/core/res/res/values/attrs.xml b/core/res/res/values/attrs.xml
index b3f280b..67072b6 100755
--- a/core/res/res/values/attrs.xml
+++ b/core/res/res/values/attrs.xml
@@ -942,6 +942,21 @@
<enum name="KEYCODE_PAGE_DOWN" value="93" />
<enum name="KEYCODE_PICTSYMBOLS" value="94" />
<enum name="KEYCODE_SWITCH_CHARSET" value="95" />
+ <enum name="KEYCODE_BUTTON_A" value="96" />
+ <enum name="KEYCODE_BUTTON_B" value="97" />
+ <enum name="KEYCODE_BUTTON_C" value="98" />
+ <enum name="KEYCODE_BUTTON_X" value="99" />
+ <enum name="KEYCODE_BUTTON_Y" value="100" />
+ <enum name="KEYCODE_BUTTON_Z" value="101" />
+ <enum name="KEYCODE_BUTTON_L1" value="102" />
+ <enum name="KEYCODE_BUTTON_R1" value="103" />
+ <enum name="KEYCODE_BUTTON_L2" value="104" />
+ <enum name="KEYCODE_BUTTON_R2" value="105" />
+ <enum name="KEYCODE_BUTTON_THUMBL" value="106" />
+ <enum name="KEYCODE_BUTTON_THUMBR" value="107" />
+ <enum name="KEYCODE_BUTTON_START" value="108" />
+ <enum name="KEYCODE_BUTTON_SELECT" value="109" />
+ <enum name="KEYCODE_BUTTON_MODE" value="110" />
</attr>
<!-- ***************************************************************** -->
diff --git a/core/res/res/values/dimens.xml b/core/res/res/values/dimens.xml
index 4d67bdd..679e642 100644
--- a/core/res/res/values/dimens.xml
+++ b/core/res/res/values/dimens.xml
@@ -19,9 +19,9 @@
-->
<resources>
<!-- The width that is used when creating thumbnails of applications. -->
- <dimen name="thumbnail_width">84dp</dimen>
+ <dimen name="thumbnail_width">0dp</dimen>
<!-- The height that is used when creating thumbnails of applications. -->
- <dimen name="thumbnail_height">63dp</dimen>
+ <dimen name="thumbnail_height">0dp</dimen>
<!-- The standard size (both width and height) of an application icon that
will be displayed in the app launcher and elsewhere. -->
<dimen name="app_icon_size">48dip</dimen>
diff --git a/core/res/res/values/public.xml b/core/res/res/values/public.xml
index 8b9a4ae..d0be554 100644
--- a/core/res/res/values/public.xml
+++ b/core/res/res/values/public.xml
@@ -869,8 +869,11 @@
<public type="drawable" name="stat_sys_download" id="0x01080081" />
<public type="drawable" name="stat_sys_download_done" id="0x01080082" />
<public type="drawable" name="stat_sys_headset" id="0x01080083" />
+ <!-- @deprecated Replaced by a private asset in the phone app. -->
<public type="drawable" name="stat_sys_phone_call" id="0x01080084" />
+ <!-- @deprecated Replaced by a private asset in the phone app. -->
<public type="drawable" name="stat_sys_phone_call_forward" id="0x01080085" />
+ <!-- @deprecated Replaced by a private asset in the phone app. -->
<public type="drawable" name="stat_sys_phone_call_on_hold" id="0x01080086" />
<public type="drawable" name="stat_sys_speakerphone" id="0x01080087" />
<public type="drawable" name="stat_sys_upload" id="0x01080088" />
@@ -1132,7 +1135,9 @@
<public type="style" name="Widget.ProgressBar.Large.Inverse" id="0x0103005c" />
<public type="style" name="Widget.ProgressBar.Small.Inverse" id="0x0103005d" />
+ <!-- @deprecated Replaced by a private asset in the phone app. -->
<public type="drawable" name="stat_sys_vp_phone_call" id="0x010800a7" />
+ <!-- @deprecated Replaced by a private asset in the phone app. -->
<public type="drawable" name="stat_sys_vp_phone_call_on_hold" id="0x010800a8" />
<public type="anim" name="anticipate_interpolator" id="0x010a0007" />
@@ -1310,4 +1315,11 @@
<public type="style" name="Widget.Spinner.DropDown" />
<public type="style" name="Widget.ActionButton" />
+ <!-- Standard content view for a {@link android.app.ListFragment}.
+ If you are implementing a subclass of ListFragment with your
+ own customized content, you can include this layout in that
+ content to still retain all of the standard functionality of
+ the base class. -->
+ <public type="layout" name="list_content" />
+
</resources>
diff --git a/core/res/res/values/styles.xml b/core/res/res/values/styles.xml
index 449b56c..3c09a89 100644
--- a/core/res/res/values/styles.xml
+++ b/core/res/res/values/styles.xml
@@ -626,7 +626,7 @@
<style name="TextAppearance">
<item name="android:textColor">?textColorPrimary</item>
- <item name="android:textColorHighlight">#FFFF9200</item>
+ <item name="android:textColorHighlight">#D077A14B</item>
<item name="android:textColorHint">?textColorHint</item>
<item name="android:textColorLink">#5C5CFF</item>
<item name="android:textSize">16sp</item>
diff --git a/docs/html/resources/articles/painless-threading.jd b/docs/html/resources/articles/painless-threading.jd
index 921f4df..17cec35 100644
--- a/docs/html/resources/articles/painless-threading.jd
+++ b/docs/html/resources/articles/painless-threading.jd
@@ -108,7 +108,7 @@
new DownloadImageTask().execute("http://example.com/image.png");
}
-private class DownloadImageTask extends AsyncTask<string, void,="" bitmap=""> {
+private class DownloadImageTask extends AsyncTask<String, Void, Bitmap> {
protected Bitmap doInBackground(String... urls) {
return loadImageFromNetwork(urls[0]);
}
diff --git a/docs/html/sdk/android-1.5.jd b/docs/html/sdk/android-1.5.jd
index 1d6e0ad..0c16b60 100644
--- a/docs/html/sdk/android-1.5.jd
+++ b/docs/html/sdk/android-1.5.jd
@@ -139,7 +139,7 @@
<div class="toggleable closed">
<a href="#" onclick="return toggleDiv(this)">
- <img src="{@docRoot}assets/images/triangle-opened.png" class="toggle-img" height="9px" width="9px" />
+ <img src="{@docRoot}assets/images/triangle-closed.png" class="toggle-img" height="9px" width="9px" />
Android 1.5, Revision 3</a> <em>(July 2009)</em></a>
<div class="toggleme">
<dl>
diff --git a/docs/html/sdk/android-1.6.jd b/docs/html/sdk/android-1.6.jd
index c2651b6..c4e08ff 100644
--- a/docs/html/sdk/android-1.6.jd
+++ b/docs/html/sdk/android-1.6.jd
@@ -138,7 +138,7 @@
<div class="toggleable closed">
<a href="#" onclick="return toggleDiv(this)">
- <img src="{@docRoot}assets/images/triangle-opened.png" class="toggle-img" height="9px" width="9px" />
+ <img src="{@docRoot}assets/images/triangle-closed.png" class="toggle-img" height="9px" width="9px" />
Android 1.6, Revision 2</a> <em>(December 2009)</em></a>
<div class="toggleme">
<dl>
diff --git a/docs/html/sdk/android-2.1.jd b/docs/html/sdk/android-2.1.jd
index 7490bae..cd48a72 100644
--- a/docs/html/sdk/android-2.1.jd
+++ b/docs/html/sdk/android-2.1.jd
@@ -139,7 +139,7 @@
<div class="toggleable closed">
<a href="#" onclick="return toggleDiv(this)">
- <img src="{@docRoot}assets/images/triangle-opened.png" class="toggle-img" height="9px" width="9px" />
+ <img src="{@docRoot}assets/images/triangle-closed.png" class="toggle-img" height="9px" width="9px" />
Android 2.1, Revision 1</a> <em>(January 2010)</em></a>
<div class="toggleme">
<dl>
diff --git a/docs/html/sdk/android-2.2.jd b/docs/html/sdk/android-2.2.jd
index f82edf9..495fd80 100644
--- a/docs/html/sdk/android-2.2.jd
+++ b/docs/html/sdk/android-2.2.jd
@@ -2,7 +2,6 @@
sdk.platform.version=2.2
sdk.platform.apiLevel=8
sdk.platform.majorMinor=minor
-sdk.platform.deployableDate=May 2010
@jd:body
@@ -118,6 +117,30 @@
<div class="toggleable opened">
<a href="#" onclick="return toggleDiv(this)">
<img src="{@docRoot}assets/images/triangle-opened.png" class="toggle-img" height="9px" width="9px" />
+ Android {@sdkPlatformVersion}, Revision 2</a> <em>(July 2010)</em></a>
+ <div class="toggleme">
+<dl>
+<dt>Dependencies:</dt>
+<dd>
+<p>Requires SDK Tools r6 or higher.</p>
+</dd>
+
+<dt>System Image:</dt>
+<dd>
+<ul>
+<li>Adds default Search Widget.</li>
+<li>Includes proper provisioning for the platform's Backup Manager. For more information about how to use the Backup Manager, see <a href="{@docRoot}guide/topics/data/backup.html">Data Backup</a>.</li>
+<li>Updates the Android 2.2 system image to FRF91.</li>
+</ul>
+</dd>
+
+</dl>
+ </div>
+</div>
+
+<div class="toggleable closed">
+ <a href="#" onclick="return toggleDiv(this)">
+ <img src="{@docRoot}assets/images/triangle-closed.png" class="toggle-img" height="9px" width="9px" />
Android {@sdkPlatformVersion}, Revision 1</a> <em>(May 2010)</em></a>
<div class="toggleme">
<dl>
@@ -135,7 +158,6 @@
</div>
</div>
-
<h2 id="api-level">API Level</h2>
<p>The Android {@sdkPlatformVersion} platform delivers an updated version of
@@ -444,4 +466,4 @@
<p>For more information about how to develop an application that displays
and functions properly on all Android-powered devices, see <a
href="{@docRoot}guide/practices/screens_support.html">Supporting Multiple
-Screens</a>.</p>
\ No newline at end of file
+Screens</a>.</p>
diff --git a/docs/html/sdk/eclipse-adt.jd b/docs/html/sdk/eclipse-adt.jd
index 1a42e7f..bd7eeed 100644
--- a/docs/html/sdk/eclipse-adt.jd
+++ b/docs/html/sdk/eclipse-adt.jd
@@ -298,7 +298,7 @@
<p>Additionally, before you can configure or use ADT, you must install the
Android SDK starter package, as described in <a
-href="installing.html#Installing">Downloading the SDK Starter Pacskage</a>.
+href="installing.html#Installing">Downloading the SDK Starter Package</a>.
Specifically, you need to install a compatible version of the Android SDK Tools
and at least one development platform. To simplify ADT setup, we recommend
installing the Android SDK prior to installing ADT. </p>
diff --git a/docs/html/sdk/ndk/index.jd b/docs/html/sdk/ndk/index.jd
index 69cc73d..9e88d94 100644
--- a/docs/html/sdk/ndk/index.jd
+++ b/docs/html/sdk/ndk/index.jd
@@ -1,16 +1,16 @@
ndk=true
-ndk.win_download=android-ndk-r4-windows.zip
-ndk.win_bytes=45778965
-ndk.win_checksum=1eded98a7f5cd5e71f8ac74565f73f11
+ndk.win_download=android-ndk-r4b-windows.zip
+ndk.win_bytes=45792835
+ndk.win_checksum=e397145e155a639be53ee4b6db8ad511
-ndk.mac_download=android-ndk-r4-darwin-x86.zip
-ndk.mac_bytes=50572163
-ndk.mac_checksum=b7d5f149fecf951c05a79b045f00419f
+ndk.mac_download=android-ndk-r4b-darwin-x86.zip
+ndk.mac_bytes=50586041
+ndk.mac_checksum=41dbd54335fb828ee408eab17103a1b0
-ndk.linux_download=android-ndk-r4-linux-x86.zip
-ndk.linux_bytes=49450682
-ndk.linux_checksum=0892b0637d45d145e045cc68e163dee3
+ndk.linux_download=android-ndk-r4b-linux-x86.zip
+ndk.linux_bytes=49464776
+ndk.linux_checksum=2deabcb125c219b34140975b710f00ec
page.title=Android NDK
@jd:body
@@ -62,8 +62,15 @@
<div class="toggleable open">
<a href="#" onclick="return toggleDiv(this)">
<img src="{@docRoot}assets/images/triangle-opened.png" class="toggle-img" height="9px" width="9px" />
-Android NDK, Revision 4</a> <em>(May 2010)</em>
+Android NDK, Revision 4b</a> <em>(June 2010)</em>
<div class="toggleme">
+<dl>
+<dt>NDK r4b notes:</dt>
+<dd><p>Includes fixes for several issues in the NDK build and debugging scripts
+— if you are using NDK r4, we recommend downloading the NDK r4b build. For
+detailed information the changes in this release, read the CHANGES.TXT document
+included in the downloaded NDK package.</p></dd>
+</dl>
<dl>
<dt>General notes:</dt>
@@ -114,7 +121,7 @@
<div class="toggleable closed">
<a href="#" onclick="return toggleDiv(this)">
- <img src="{@docRoot}assets/images/triangle-opened.png" class="toggle-img" height="9px" width="9px" />
+ <img src="{@docRoot}assets/images/triangle-closed.png" class="toggle-img" height="9px" width="9px" />
Android NDK, Revision 3</a> <em>(March 2010)</em>
<div class="toggleme">
diff --git a/docs/html/sdk/sdk_toc.cs b/docs/html/sdk/sdk_toc.cs
index a80981ce..404e938 100644
--- a/docs/html/sdk/sdk_toc.cs
+++ b/docs/html/sdk/sdk_toc.cs
@@ -75,7 +75,8 @@
</li>
</ul>
<ul>
- <li><a href="<?cs var:toroot ?>sdk/tools-notes.html">SDK Tools, r6</a> <span class="new">new!</span></li>
+ <li><a href="<?cs var:toroot ?>sdk/tools-notes.html">SDK Tools, r6</a>
+ </li>
<li><a href="<?cs var:toroot ?>sdk/win-usb.html">USB Driver for
Windows, r3</a>
</li>
@@ -101,7 +102,6 @@
<span style="display:none" class="ja"></span>
<span style="display:none" class="zh-CN"></span>
<span style="display:none" class="zh-TW"></span></a>
- <span class="new">new!</span>
</li>
</ul>
</li>
@@ -116,7 +116,7 @@
<span style="display:none" class="zh-TW"></span>
</h2>
<ul>
- <li><a href="<?cs var:toroot ?>sdk/ndk/index.html">Android NDK, r4</a>
+ <li><a href="<?cs var:toroot ?>sdk/ndk/index.html">Android NDK, r4b</a>
<span class="new">new!</span></li>
</ul>
</li>
diff --git a/graphics/java/android/graphics/BitmapFactory.java b/graphics/java/android/graphics/BitmapFactory.java
index 5394530..320fc4d 100644
--- a/graphics/java/android/graphics/BitmapFactory.java
+++ b/graphics/java/android/graphics/BitmapFactory.java
@@ -69,8 +69,11 @@
* the decoder will try to pick the best matching config based on the
* system's screen depth, and characteristics of the original image such
* as if it has per-pixel alpha (requiring a config that also does).
+ *
+ * The configuration is set to {@link android.graphics.Bitmap.Config#ARGB_8888}
+ * by default.
*/
- public Bitmap.Config inPreferredConfig;
+ public Bitmap.Config inPreferredConfig = Bitmap.Config.ARGB_8888;
/**
* If dither is true, the decoder will attempt to dither the decoded
diff --git a/graphics/java/android/graphics/BitmapShader.java b/graphics/java/android/graphics/BitmapShader.java
index 612b0ab..37b40e7 100644
--- a/graphics/java/android/graphics/BitmapShader.java
+++ b/graphics/java/android/graphics/BitmapShader.java
@@ -16,10 +16,25 @@
package android.graphics;
+/**
+ * Shader used to draw a bitmap as a texture. The bitmap can be repeated or
+ * mirrored by setting the tiling mode.
+ */
public class BitmapShader extends Shader {
-
- // we hold on just for the GC, since our native counterpart is using it
- private Bitmap mBitmap;
+ /**
+ * We hold on just for the GC, since our native counterpart is using it.
+ *
+ * @hide
+ */
+ public Bitmap mBitmap;
+ /**
+ * @hide
+ */
+ public int mTileX;
+ /**
+ * @hide
+ */
+ public int mTileY;
/**
* Call this to create a new shader that will draw with a bitmap.
@@ -30,12 +45,11 @@
*/
public BitmapShader(Bitmap bitmap, TileMode tileX, TileMode tileY) {
mBitmap = bitmap;
- native_instance = nativeCreate(bitmap.ni(),
- tileX.nativeInt, tileY.nativeInt);
+ mTileX = tileX.nativeInt;
+ mTileY = tileY.nativeInt;
+ native_instance = nativeCreate(bitmap.ni(), mTileX, mTileY);
}
- private static native int nativeCreate(int native_bitmap,
- int shaderTileModeX,
- int shaderTileModeY);
+ private static native int nativeCreate(int native_bitmap, int shaderTileModeX,
+ int shaderTileModeY);
}
-
diff --git a/graphics/java/android/graphics/ImageFormat.java b/graphics/java/android/graphics/ImageFormat.java
index f126374..3f9f961 100644
--- a/graphics/java/android/graphics/ImageFormat.java
+++ b/graphics/java/android/graphics/ImageFormat.java
@@ -16,61 +16,84 @@
package android.graphics;
-public class ImageFormat
-{
- /* these constants are chosen to be binary compatible with
- * their previous location in PixelFormat.java */
-
- public static final int UNKNOWN = 0;
+public class ImageFormat {
+ /*
+ * these constants are chosen to be binary compatible with their previous
+ * location in PixelFormat.java
+ */
- /** RGB format used for pictures encoded as RGB_565
- * see {@link android.hardware.Camera.Parameters#setPictureFormat(int)}.
- */
- public static final int RGB_565 = 4;
+ public static final int UNKNOWN = 0;
- /**
- * YCbCr formats, used for video. These are not necessarily supported
- * by the hardware.
- */
- public static final int NV16 = 0x10;
+ /**
+ * RGB format used for pictures encoded as RGB_565 see
+ * {@link android.hardware.Camera.Parameters#setPictureFormat(int)}.
+ */
+ public static final int RGB_565 = 4;
-
- /** YCrCb format used for images, which uses the NV21 encoding format.
- * This is the default format for camera preview images, when not
- * otherwise set with
- * {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.
- */
- public static final int NV21 = 0x11;
+ /**
+ * Planar 4:2:0 YCrCb format. This format assumes an horizontal stride of 16
+ * pixels for all planes and an implicit vertical stride of the image
+ * height's next multiple of two.
+ * y_size = stride * ALIGN(height, 2)
+ * c_size = ALIGN(stride/2, 16) * height
+ * size = y_size + c_size * 2
+ * cr_offset = y_size
+ * cb_offset = y_size + c_size
+ *
+ * Whether this format is supported by the camera hardware can be determined
+ * by
+ * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
+ */
+ public static final int YV12 = 0x32315659;
+ /**
+ * YCbCr format, used for video. Whether this format is supported by the
+ * camera hardware can be determined by
+ * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
+ */
+ public static final int NV16 = 0x10;
- /** YCbCr format used for images, which uses YUYV (YUY2) encoding format.
- * This is an alternative format for camera preview images. Whether this
- * format is supported by the camera hardware can be determined by
- * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
- */
- public static final int YUY2 = 0x14;
+ /**
+ * YCrCb format used for images, which uses the NV21 encoding format. This
+ * is the default format for camera preview images, when not otherwise set
+ * with {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.
+ */
+ public static final int NV21 = 0x11;
-
- /**
- * Encoded formats. These are not necessarily supported by the hardware.
- */
- public static final int JPEG = 0x100;
+ /**
+ * YCbCr format used for images, which uses YUYV (YUY2) encoding format.
+ * This is an alternative format for camera preview images. Whether this
+ * format is supported by the camera hardware can be determined by
+ * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
+ */
+ public static final int YUY2 = 0x14;
+ /**
+ * Encoded formats. These are not necessarily supported by the hardware.
+ */
+ public static final int JPEG = 0x100;
- /**
- * Use this function to retrieve the number of bits per pixel of
- * an ImageFormat.
- * @param format
- * @return the number of bits per pixel of the given format or -1 if the
- * format doesn't exist or is not supported.
- */
- public static int getBitsPerPixel(int format) {
- switch (format) {
- case RGB_565: return 16;
- case NV16: return 16;
- case NV21: return 12;
- case YUY2: return 16;
- }
- return -1;
- }
+ /**
+ * Use this function to retrieve the number of bits per pixel of an
+ * ImageFormat.
+ *
+ * @param format
+ * @return the number of bits per pixel of the given format or -1 if the
+ * format doesn't exist or is not supported.
+ */
+ public static int getBitsPerPixel(int format) {
+ switch (format) {
+ case RGB_565:
+ return 16;
+ case NV16:
+ return 16;
+ case YUY2:
+ return 16;
+ case YV12:
+ return 12;
+ case NV21:
+ return 12;
+ }
+ return -1;
+ }
}
diff --git a/graphics/java/android/graphics/Paint.java b/graphics/java/android/graphics/Paint.java
index 88ff0e6..9b4d3a8 100644
--- a/graphics/java/android/graphics/Paint.java
+++ b/graphics/java/android/graphics/Paint.java
@@ -1718,7 +1718,8 @@
if ((index | count) < 0 || index + count > text.length) {
throw new ArrayIndexOutOfBoundsException();
}
- native_getTextPath(mNativePaint, text, index, count, x, y, path.ni());
+ native_getTextPath(mNativePaint, mBidiFlags, text, index, count, x, y,
+ path.ni());
}
/**
@@ -1739,7 +1740,8 @@
if ((start | end | (end - start) | (text.length() - end)) < 0) {
throw new IndexOutOfBoundsException();
}
- native_getTextPath(mNativePaint, text, start, end, x, y, path.ni());
+ native_getTextPath(mNativePaint, mBidiFlags, text, start, end, x, y,
+ path.ni());
}
/**
@@ -1836,9 +1838,9 @@
private native int native_getTextRunCursor(int native_object, String text,
int contextStart, int contextEnd, int flags, int offset, int cursorOpt);
- private static native void native_getTextPath(int native_object,
+ private static native void native_getTextPath(int native_object, int bidiFlags,
char[] text, int index, int count, float x, float y, int path);
- private static native void native_getTextPath(int native_object,
+ private static native void native_getTextPath(int native_object, int bidiFlags,
String text, int start, int end, float x, float y, int path);
private static native void nativeGetStringBounds(int nativePaint,
String text, int start, int end, Rect bounds);
diff --git a/graphics/java/android/renderscript/Allocation.java b/graphics/java/android/renderscript/Allocation.java
index ddb2abf..bfa61f3 100644
--- a/graphics/java/android/renderscript/Allocation.java
+++ b/graphics/java/android/renderscript/Allocation.java
@@ -40,6 +40,11 @@
mType = t;
}
+ Allocation(int id, RenderScript rs) {
+ super(rs);
+ mID = id;
+ }
+
public Type getType() {
return mType;
}
diff --git a/graphics/java/android/renderscript/BaseObj.java b/graphics/java/android/renderscript/BaseObj.java
index 002fc78..28675dc 100644
--- a/graphics/java/android/renderscript/BaseObj.java
+++ b/graphics/java/android/renderscript/BaseObj.java
@@ -81,5 +81,10 @@
mRS.nObjDestroy(mID);
}
+ // If an object came from an a3d file, java fields need to be
+ // created with objects from the native layer
+ void updateFromNative() {
+ }
+
}
diff --git a/graphics/java/android/renderscript/Mesh.java b/graphics/java/android/renderscript/Mesh.java
index 5a53878..4bee97a 100644
--- a/graphics/java/android/renderscript/Mesh.java
+++ b/graphics/java/android/renderscript/Mesh.java
@@ -59,6 +59,38 @@
return mPrimitives[slot];
}
+ @Override
+ void updateFromNative() {
+ int vtxCount = mRS.nMeshGetVertexBufferCount(mID);
+ int idxCount = mRS.nMeshGetIndexCount(mID);
+
+ int[] vtxIDs = new int[vtxCount];
+ int[] idxIDs = new int[idxCount];
+ int[] primitives = new int[idxCount];
+
+ mRS.nMeshGetVertices(mID, vtxIDs, vtxCount);
+ mRS.nMeshGetIndices(mID, idxIDs, primitives, vtxCount);
+
+ mVertexBuffers = new Allocation[vtxCount];
+ mIndexBuffers = new Allocation[idxCount];
+ mPrimitives = new Primitive[idxCount];
+
+ for(int i = 0; i < vtxCount; i ++) {
+ if(vtxIDs[i] != 0) {
+ mVertexBuffers[i] = new Allocation(vtxIDs[i], mRS);
+ mVertexBuffers[i].updateFromNative();
+ }
+ }
+
+ for(int i = 0; i < idxCount; i ++) {
+ if(idxIDs[i] != 0) {
+ mIndexBuffers[i] = new Allocation(idxIDs[i], mRS);
+ mIndexBuffers[i].updateFromNative();
+ }
+ mPrimitives[i] = Primitive.values()[primitives[i]];
+ }
+ }
+
public static class Builder {
RenderScript mRS;
diff --git a/graphics/java/android/renderscript/ProgramRaster.java b/graphics/java/android/renderscript/ProgramRaster.java
index d7c98aa..c3ab481 100644
--- a/graphics/java/android/renderscript/ProgramRaster.java
+++ b/graphics/java/android/renderscript/ProgramRaster.java
@@ -26,12 +26,23 @@
*
**/
public class ProgramRaster extends BaseObj {
+
+ public enum CullMode {
+ BACK (0),
+ FRONT (1),
+ NONE (2);
+
+ int mID;
+ CullMode(int id) {
+ mID = id;
+ }
+ }
+
boolean mPointSmooth;
boolean mLineSmooth;
boolean mPointSprite;
float mLineWidth;
- Element mIn;
- Element mOut;
+ CullMode mCullMode;
ProgramRaster(int id, RenderScript rs) {
super(rs);
@@ -41,6 +52,8 @@
mPointSmooth = false;
mLineSmooth = false;
mPointSprite = false;
+
+ mCullMode = CullMode.BACK;
}
public void setLineWidth(float w) {
@@ -49,45 +62,48 @@
mRS.nProgramRasterSetLineWidth(mID, w);
}
- void internalInit() {
- int inID = 0;
- int outID = 0;
- if (mIn != null) {
- inID = mIn.mID;
- }
- if (mOut != null) {
- outID = mOut.mID;
- }
- mID = mRS.nProgramRasterCreate(inID, outID, mPointSmooth, mLineSmooth, mPointSprite);
+ public void setCullMode(CullMode m) {
+ mRS.validate();
+ mCullMode = m;
+ mRS.nProgramRasterSetCullMode(mID, m.mID);
}
-
public static class Builder {
RenderScript mRS;
- ProgramRaster mPR;
+ boolean mPointSprite;
+ boolean mPointSmooth;
+ boolean mLineSmooth;
+ // Legacy to not break app in other projects, will be removed in cleanup pass
public Builder(RenderScript rs, Element in, Element out) {
mRS = rs;
- mPR = new ProgramRaster(0, rs);
+ mPointSmooth = false;
+ mLineSmooth = false;
+ mPointSprite = false;
+ }
+
+ public Builder(RenderScript rs) {
+ mRS = rs;
+ mPointSmooth = false;
+ mLineSmooth = false;
+ mPointSprite = false;
}
public void setPointSpriteEnable(boolean enable) {
- mPR.mPointSprite = enable;
+ mPointSprite = enable;
}
public void setPointSmoothEnable(boolean enable) {
- mPR.mPointSmooth = enable;
+ mPointSmooth = enable;
}
public void setLineSmoothEnable(boolean enable) {
- mPR.mLineSmooth = enable;
+ mLineSmooth = enable;
}
-
static synchronized ProgramRaster internalCreate(RenderScript rs, Builder b) {
- b.mPR.internalInit();
- ProgramRaster pr = b.mPR;
- b.mPR = new ProgramRaster(0, b.mRS);
+ int id = rs.nProgramRasterCreate(b.mPointSmooth, b.mLineSmooth, b.mPointSprite);
+ ProgramRaster pr = new ProgramRaster(id, rs);
return pr;
}
@@ -103,3 +119,4 @@
+
diff --git a/graphics/java/android/renderscript/RenderScript.java b/graphics/java/android/renderscript/RenderScript.java
index fa9eeda..240d544 100644
--- a/graphics/java/android/renderscript/RenderScript.java
+++ b/graphics/java/android/renderscript/RenderScript.java
@@ -165,8 +165,9 @@
native void nProgramStoreDither(boolean enable);
native int nProgramStoreCreate();
- native int nProgramRasterCreate(int in, int out, boolean pointSmooth, boolean lineSmooth, boolean pointSprite);
+ native int nProgramRasterCreate(boolean pointSmooth, boolean lineSmooth, boolean pointSprite);
native void nProgramRasterSetLineWidth(int pr, float v);
+ native void nProgramRasterSetCullMode(int pr, int mode);
native void nProgramBindConstants(int pv, int slot, int mID);
native void nProgramBindTexture(int vpf, int slot, int a);
@@ -188,6 +189,10 @@
native int nMeshCreate(int vtxCount, int indexCount);
native void nMeshBindVertex(int id, int alloc, int slot);
native void nMeshBindIndex(int id, int alloc, int prim, int slot);
+ native int nMeshGetVertexBufferCount(int id);
+ native int nMeshGetIndexCount(int id);
+ native void nMeshGetVertices(int id, int[] vtxIds, int vtxIdCount);
+ native void nMeshGetIndices(int id, int[] idxIds, int[] primitives, int vtxIdCount);
native void nAnimationBegin(int attribCount, int keyframeCount);
native void nAnimationAdd(float time, float[] attribs);
@@ -357,3 +362,4 @@
}
+
diff --git a/graphics/jni/android_renderscript_RenderScript.cpp b/graphics/jni/android_renderscript_RenderScript.cpp
index 66606aa..13360c3 100644
--- a/graphics/jni/android_renderscript_RenderScript.cpp
+++ b/graphics/jni/android_renderscript_RenderScript.cpp
@@ -725,12 +725,10 @@
static int
nFileA3DGetNumIndexEntries(JNIEnv *_env, jobject _this, jint fileA3D)
{
- LOGV("______nFileA3D %u", (uint32_t) fileA3D);
RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
int32_t numEntries = 0;
rsFileA3DGetNumIndexEntries(con, &numEntries, (RsFile)fileA3D);
- LOGV("______nFileA3D NumEntries %u", (uint32_t) numEntries);
return numEntries;
}
@@ -1203,13 +1201,12 @@
// ---------------------------------------------------------------------------
static jint
-nProgramRasterCreate(JNIEnv *_env, jobject _this, jint in, jint out,
- jboolean pointSmooth, jboolean lineSmooth, jboolean pointSprite)
+nProgramRasterCreate(JNIEnv *_env, jobject _this, jboolean pointSmooth, jboolean lineSmooth, jboolean pointSprite)
{
RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
- LOG_API("nProgramRasterCreate, con(%p), in(%p), out(%p), pointSmooth(%i), lineSmooth(%i), pointSprite(%i)",
- con, (RsElement)in, (RsElement)out, pointSmooth, lineSmooth, pointSprite);
- return (jint)rsProgramRasterCreate(con, (RsElement)in, (RsElement)out, pointSmooth, lineSmooth, pointSprite);
+ LOG_API("nProgramRasterCreate, con(%p), pointSmooth(%i), lineSmooth(%i), pointSprite(%i)",
+ con, pointSmooth, lineSmooth, pointSprite);
+ return (jint)rsProgramRasterCreate(con, pointSmooth, lineSmooth, pointSprite);
}
static void
@@ -1217,7 +1214,15 @@
{
RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
LOG_API("nProgramRasterSetLineWidth, con(%p), vpf(%p), value(%f)", con, (RsProgramRaster)vpr, v);
- rsProgramRasterSetLineWidth(con, (RsProgramFragment)vpr, v);
+ rsProgramRasterSetLineWidth(con, (RsProgramRaster)vpr, v);
+}
+
+static void
+nProgramRasterSetCullMode(JNIEnv *_env, jobject _this, jint vpr, jint v)
+{
+ RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
+ LOG_API("nProgramRasterSetCullMode, con(%p), vpf(%p), value(%i)", con, (RsProgramRaster)vpr, v);
+ rsProgramRasterSetCullMode(con, (RsProgramRaster)vpr, (RsCullMode)v);
}
@@ -1352,19 +1357,75 @@
}
static void
-nMeshBindVertex(JNIEnv *_env, jobject _this, jint s, jint alloc, jint slot)
+nMeshBindVertex(JNIEnv *_env, jobject _this, jint mesh, jint alloc, jint slot)
{
RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
- LOG_API("nMeshBindVertex, con(%p), Mesh(%p), Alloc(%p), slot(%i)", con, (RsMesh)s, (RsAllocation)alloc, slot);
- rsMeshBindVertex(con, (RsMesh)s, (RsAllocation)alloc, slot);
+ LOG_API("nMeshBindVertex, con(%p), Mesh(%p), Alloc(%p), slot(%i)", con, (RsMesh)mesh, (RsAllocation)alloc, slot);
+ rsMeshBindVertex(con, (RsMesh)mesh, (RsAllocation)alloc, slot);
}
static void
-nMeshBindIndex(JNIEnv *_env, jobject _this, jint s, jint alloc, jint primID, jint slot)
+nMeshBindIndex(JNIEnv *_env, jobject _this, jint mesh, jint alloc, jint primID, jint slot)
{
RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
- LOG_API("nMeshBindIndex, con(%p), Mesh(%p), Alloc(%p)", con, (RsMesh)s, (RsAllocation)alloc);
- rsMeshBindIndex(con, (RsMesh)s, (RsAllocation)alloc, primID, slot);
+ LOG_API("nMeshBindIndex, con(%p), Mesh(%p), Alloc(%p)", con, (RsMesh)mesh, (RsAllocation)alloc);
+ rsMeshBindIndex(con, (RsMesh)mesh, (RsAllocation)alloc, primID, slot);
+}
+
+static jint
+nMeshGetVertexBufferCount(JNIEnv *_env, jobject _this, jint mesh)
+{
+ RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
+ LOG_API("nMeshGetVertexBufferCount, con(%p), Mesh(%p)", con, (RsMesh)mesh);
+ jint vtxCount = 0;
+ rsMeshGetVertexBufferCount(con, (RsMesh)mesh, &vtxCount);
+ return vtxCount;
+}
+
+static jint
+nMeshGetIndexCount(JNIEnv *_env, jobject _this, jint mesh)
+{
+ RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
+ LOG_API("nMeshGetIndexCount, con(%p), Mesh(%p)", con, (RsMesh)mesh);
+ jint idxCount = 0;
+ rsMeshGetIndexCount(con, (RsMesh)mesh, &idxCount);
+ return idxCount;
+}
+
+static void
+nMeshGetVertices(JNIEnv *_env, jobject _this, jint mesh, jintArray _ids, int numVtxIDs)
+{
+ RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
+ LOG_API("nMeshGetVertices, con(%p), Mesh(%p)", con, (RsMesh)mesh);
+
+ RsAllocation *allocs = (RsAllocation*)malloc((uint32_t)numVtxIDs * sizeof(RsAllocation));
+ rsMeshGetVertices(con, (RsMesh)mesh, allocs, (uint32_t)numVtxIDs);
+
+ for(jint i = 0; i < numVtxIDs; i ++) {
+ _env->SetIntArrayRegion(_ids, i, 1, (const jint*)&allocs[i]);
+ }
+
+ free(allocs);
+}
+
+static void
+nMeshGetIndices(JNIEnv *_env, jobject _this, jint mesh, jintArray _idxIds, jintArray _primitives, int numIndices)
+{
+ RsContext con = (RsContext)(_env->GetIntField(_this, gContextId));
+ LOG_API("nMeshGetVertices, con(%p), Mesh(%p)", con, (RsMesh)mesh);
+
+ RsAllocation *allocs = (RsAllocation*)malloc((uint32_t)numIndices * sizeof(RsAllocation));
+ uint32_t *prims= (uint32_t*)malloc((uint32_t)numIndices * sizeof(uint32_t));
+
+ rsMeshGetIndices(con, (RsMesh)mesh, allocs, prims, (uint32_t)numIndices);
+
+ for(jint i = 0; i < numIndices; i ++) {
+ _env->SetIntArrayRegion(_idxIds, i, 1, (const jint*)&allocs[i]);
+ _env->SetIntArrayRegion(_primitives, i, 1, (const jint*)&prims[i]);
+ }
+
+ free(allocs);
+ free(prims);
}
// ---------------------------------------------------------------------------
@@ -1473,8 +1534,9 @@
{"nProgramFragmentCreate", "([I)I", (void*)nProgramFragmentCreate },
{"nProgramFragmentCreate2", "(Ljava/lang/String;[I)I", (void*)nProgramFragmentCreate2 },
-{"nProgramRasterCreate", "(IIZZZ)I", (void*)nProgramRasterCreate },
+{"nProgramRasterCreate", "(ZZZ)I", (void*)nProgramRasterCreate },
{"nProgramRasterSetLineWidth", "(IF)V", (void*)nProgramRasterSetLineWidth },
+{"nProgramRasterSetCullMode", "(II)V", (void*)nProgramRasterSetCullMode },
{"nProgramVertexCreate", "(Z)I", (void*)nProgramVertexCreate },
{"nProgramVertexCreate2", "(Ljava/lang/String;[I)I", (void*)nProgramVertexCreate2 },
@@ -1500,6 +1562,11 @@
{"nMeshBindVertex", "(III)V", (void*)nMeshBindVertex },
{"nMeshBindIndex", "(IIII)V", (void*)nMeshBindIndex },
+{"nMeshGetVertexBufferCount", "(I)I", (void*)nMeshGetVertexBufferCount },
+{"nMeshGetIndexCount", "(I)I", (void*)nMeshGetIndexCount },
+{"nMeshGetVertices", "(I[II)V", (void*)nMeshGetVertices },
+{"nMeshGetIndices", "(I[I[II)V", (void*)nMeshGetIndices },
+
};
static int registerFuncs(JNIEnv *_env)
@@ -1532,3 +1599,4 @@
bail:
return result;
}
+
diff --git a/include/android_runtime/android_app_NativeActivity.h b/include/android_runtime/android_app_NativeActivity.h
index f808328..d7a9a2c 100644
--- a/include/android_runtime/android_app_NativeActivity.h
+++ b/include/android_runtime/android_app_NativeActivity.h
@@ -17,6 +17,8 @@
#ifndef _ANDROID_APP_NATIVEACTIVITY_H
#define _ANDROID_APP_NATIVEACTIVITY_H
+#include <ui/InputTransport.h>
+
#include <android/native_activity.h>
#include "jni.h"
@@ -29,7 +31,65 @@
extern void android_NativeActivity_setWindowFlags(
ANativeActivity* activity, int32_t values, int32_t mask);
+extern void android_NativeActivity_showSoftInput(
+ ANativeActivity* activity, int32_t flags);
+
+extern void android_NativeActivity_hideSoftInput(
+ ANativeActivity* activity, int32_t flags);
} // namespace android
+
+/*
+ * NDK input queue API.
+ */
+struct AInputQueue {
+public:
+ /* Creates a consumer associated with an input channel. */
+ explicit AInputQueue(const android::sp<android::InputChannel>& channel, int workWrite);
+
+ /* Destroys the consumer and releases its input channel. */
+ ~AInputQueue();
+
+ void attachLooper(ALooper* looper, ALooper_callbackFunc* callback, void* data);
+
+ void detachLooper();
+
+ int32_t hasEvents();
+
+ int32_t getEvent(AInputEvent** outEvent);
+
+ void finishEvent(AInputEvent* event, bool handled);
+
+
+ // ----------------------------------------------------------
+
+ inline android::InputConsumer& getConsumer() { return mConsumer; }
+
+ void dispatchEvent(android::KeyEvent* event);
+
+ android::KeyEvent* consumeUnhandledEvent();
+
+ int mWorkWrite;
+
+private:
+ void doDefaultKey(android::KeyEvent* keyEvent);
+
+ android::InputConsumer mConsumer;
+ android::PreallocatedInputEventFactory mInputEventFactory;
+ android::sp<android::PollLoop> mPollLoop;
+
+ int mDispatchKeyRead;
+ int mDispatchKeyWrite;
+
+ // This is only touched by the event reader thread. It is the current
+ // key events that came out of the mDispatchingKeys list and are now
+ //Êdelivered to the app.
+ android::Vector<android::KeyEvent*> mDeliveringKeys;
+
+ android::Mutex mLock;
+ android::Vector<android::KeyEvent*> mPendingKeys;
+ android::Vector<android::KeyEvent*> mDispatchingKeys;
+};
+
#endif // _ANDROID_APP_NATIVEACTIVITY_H
diff --git a/include/binder/BinderService.h b/include/binder/BinderService.h
new file mode 100644
index 0000000..2316fef
--- /dev/null
+++ b/include/binder/BinderService.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BINDER_SERVICE_H
+#define ANDROID_BINDER_SERVICE_H
+
+#include <stdint.h>
+
+#include <utils/Errors.h>
+#include <utils/String16.h>
+
+#include <binder/IServiceManager.h>
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+
+// ---------------------------------------------------------------------------
+namespace android {
+
+template<typename SERVICE>
+class BinderService
+{
+public:
+ static status_t publish() {
+ sp<IServiceManager> sm(defaultServiceManager());
+ return sm->addService(String16(SERVICE::getServiceName()), new SERVICE());
+ }
+
+ static void publishAndJoinThreadPool() {
+ sp<ProcessState> proc(ProcessState::self());
+ sp<IServiceManager> sm(defaultServiceManager());
+ sm->addService(String16(SERVICE::getServiceName()), new SERVICE());
+ ProcessState::self()->startThreadPool();
+ IPCThreadState::self()->joinThreadPool();
+ }
+
+ static void instantiate() { publish(); }
+
+ static status_t shutdown() {
+ return NO_ERROR;
+ }
+};
+
+
+}; // namespace android
+// ---------------------------------------------------------------------------
+#endif // ANDROID_BINDER_SERVICE_H
diff --git a/include/binder/Parcel.h b/include/binder/Parcel.h
index 2cc4db9..3aba5f6 100644
--- a/include/binder/Parcel.h
+++ b/include/binder/Parcel.h
@@ -103,6 +103,11 @@
status_t writeObject(const flat_binder_object& val, bool nullMetaData);
+ // Like Parcel.java's writeNoException(). Just writes a zero int32.
+ // Currently the native implementation doesn't do any of the StrictMode
+ // stack gathering and serialization that the Java implementation does.
+ status_t writeNoException();
+
void remove(size_t start, size_t amt);
status_t read(void* outData, size_t len) const;
@@ -125,7 +130,14 @@
sp<IBinder> readStrongBinder() const;
wp<IBinder> readWeakBinder() const;
status_t read(Flattenable& val) const;
-
+
+ // Like Parcel.java's readExceptionCode(). Reads the first int32
+ // off of a Parcel's header, returning 0 or the negative error
+ // code on exceptions, but also deals with skipping over rich
+ // response headers. Callers should use this to read & parse the
+ // response headers rather than doing it by hand.
+ int32_t readExceptionCode() const;
+
// Retrieve native_handle from the parcel. This returns a copy of the
// parcel's native_handle (the caller takes ownership). The caller
// must free the native_handle with native_handle_close() and
diff --git a/include/gui/ISensorEventConnection.h b/include/gui/ISensorEventConnection.h
new file mode 100644
index 0000000..ed4e4cc
--- /dev/null
+++ b/include/gui/ISensorEventConnection.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_ISENSOR_EVENT_CONNECTION_H
+#define ANDROID_GUI_ISENSOR_EVENT_CONNECTION_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <binder/IInterface.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+class SensorChannel;
+
+class ISensorEventConnection : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(SensorEventConnection);
+
+ virtual sp<SensorChannel> getSensorChannel() const = 0;
+ virtual status_t enableDisable(int handle, bool enabled) = 0;
+ virtual status_t setEventRate(int handle, nsecs_t ns) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnSensorEventConnection : public BnInterface<ISensorEventConnection>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_GUI_ISENSOR_EVENT_CONNECTION_H
diff --git a/include/gui/ISensorServer.h b/include/gui/ISensorServer.h
new file mode 100644
index 0000000..3e05076
--- /dev/null
+++ b/include/gui/ISensorServer.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_ISENSORSERVER_H
+#define ANDROID_GUI_ISENSORSERVER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <binder/IInterface.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+class Sensor;
+class ISensorEventConnection;
+
+class ISensorServer : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(SensorServer);
+
+ virtual Vector<Sensor> getSensorList()= 0;
+ virtual sp<ISensorEventConnection> createSensorEventConnection() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnSensorServer : public BnInterface<ISensorServer>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_GUI_ISENSORSERVER_H
diff --git a/include/gui/Sensor.h b/include/gui/Sensor.h
new file mode 100644
index 0000000..86a16f1
--- /dev/null
+++ b/include/gui/Sensor.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_SENSOR_H
+#define ANDROID_GUI_SENSOR_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <utils/Flattenable.h>
+
+#include <hardware/sensors.h>
+
+#include <android/sensor.h>
+
+// ----------------------------------------------------------------------------
+// Concrete types for the NDK
+struct ASensor { };
+
+// ----------------------------------------------------------------------------
+namespace android {
+// ----------------------------------------------------------------------------
+
+class Parcel;
+
+// ----------------------------------------------------------------------------
+
+class Sensor : public ASensor, public Flattenable
+{
+public:
+ enum {
+ TYPE_ACCELEROMETER = ASENSOR_TYPE_ACCELEROMETER,
+ TYPE_MAGNETIC_FIELD = ASENSOR_TYPE_MAGNETIC_FIELD,
+ TYPE_GYROSCOPE = ASENSOR_TYPE_GYROSCOPE,
+ TYPE_LIGHT = ASENSOR_TYPE_LIGHT,
+ TYPE_PROXIMITY = ASENSOR_TYPE_PROXIMITY
+ };
+
+ Sensor();
+ virtual ~Sensor();
+
+ const String8& getName() const;
+ const String8& getVendor() const;
+ int32_t getHandle() const;
+ int32_t getType() const;
+ float getMinValue() const;
+ float getMaxValue() const;
+ float getResolution() const;
+ float getPowerUsage() const;
+
+ // Flattenable interface
+ virtual size_t getFlattenedSize() const;
+ virtual size_t getFdCount() const;
+ virtual status_t flatten(void* buffer, size_t size,
+ int fds[], size_t count) const;
+ virtual status_t unflatten(void const* buffer, size_t size,
+ int fds[], size_t count);
+
+private:
+ String8 mName;
+ String8 mVendor;
+ int32_t mHandle;
+ int32_t mType;
+ float mMinValue;
+ float mMaxValue;
+ float mResolution;
+ float mPower;
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_GUI_SENSOR_H
diff --git a/include/gui/SensorChannel.h b/include/gui/SensorChannel.h
new file mode 100644
index 0000000..bb54618
--- /dev/null
+++ b/include/gui/SensorChannel.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_SENSOR_CHANNEL_H
+#define ANDROID_GUI_SENSOR_CHANNEL_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+
+namespace android {
+// ----------------------------------------------------------------------------
+class Parcel;
+
+class SensorChannel : public RefBase
+{
+public:
+
+ SensorChannel();
+ SensorChannel(const Parcel& data);
+ virtual ~SensorChannel();
+
+ int getFd() const;
+ ssize_t write(void const* vaddr, size_t size);
+ ssize_t read(void* vaddr, size_t size);
+
+ status_t writeToParcel(Parcel* reply) const;
+
+private:
+ int mSendFd;
+ mutable int mReceiveFd;
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_GUI_SENSOR_CHANNEL_H
diff --git a/include/gui/SensorEventQueue.h b/include/gui/SensorEventQueue.h
new file mode 100644
index 0000000..d8d8128
--- /dev/null
+++ b/include/gui/SensorEventQueue.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SENSOR_EVENT_QUEUE_H
+#define ANDROID_SENSOR_EVENT_QUEUE_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+#include <gui/SensorChannel.h>
+
+// ----------------------------------------------------------------------------
+
+struct ALooper;
+struct ASensorEvent;
+
+// Concrete types for the NDK
+struct ASensorEventQueue {
+ ALooper* looper;
+};
+
+// ----------------------------------------------------------------------------
+namespace android {
+// ----------------------------------------------------------------------------
+
+class ISensorEventConnection;
+class Sensor;
+
+// ----------------------------------------------------------------------------
+
+class SensorEventQueue : public ASensorEventQueue, public RefBase
+{
+public:
+ SensorEventQueue(const sp<ISensorEventConnection>& connection);
+ virtual ~SensorEventQueue();
+ virtual void onFirstRef();
+
+ int getFd() const;
+ ssize_t write(ASensorEvent const* events, size_t numEvents);
+ ssize_t read(ASensorEvent* events, size_t numEvents);
+
+ status_t enableSensor(Sensor const* sensor) const;
+ status_t disableSensor(Sensor const* sensor) const;
+ status_t setEventRate(Sensor const* sensor, nsecs_t ns) const;
+
+private:
+ sp<ISensorEventConnection> mSensorEventConnection;
+ sp<SensorChannel> mSensorChannel;
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_SENSOR_EVENT_QUEUE_H
diff --git a/include/gui/SensorManager.h b/include/gui/SensorManager.h
new file mode 100644
index 0000000..0d65334
--- /dev/null
+++ b/include/gui/SensorManager.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_GUI_SENSOR_MANAGER_H
+#define ANDROID_GUI_SENSOR_MANAGER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Singleton.h>
+#include <utils/Vector.h>
+
+#include <gui/SensorEventQueue.h>
+
+// ----------------------------------------------------------------------------
+// Concrete types for the NDK
+struct ASensorManager { };
+
+// ----------------------------------------------------------------------------
+namespace android {
+// ----------------------------------------------------------------------------
+
+class ISensorServer;
+class Sensor;
+class SensorEventQueue;
+
+// ----------------------------------------------------------------------------
+
+class SensorManager : public ASensorManager, public Singleton<SensorManager>
+{
+public:
+ SensorManager();
+ ~SensorManager();
+
+ ssize_t getSensorList(Sensor**) const;
+ Sensor* getDefaultSensor(int type);
+ sp<SensorEventQueue> createEventQueue();
+
+private:
+ sp<ISensorServer> mSensorServer;
+ Sensor* mSensorList;
+ Vector<Sensor> mSensors;
+};
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
+#endif // ANDROID_GUI_SENSOR_MANAGER_H
diff --git a/include/media/EffectApi.h b/include/media/EffectApi.h
index b4d738c..9f3d0b6 100644
--- a/include/media/EffectApi.h
+++ b/include/media/EffectApi.h
@@ -223,6 +223,11 @@
// samples as specified in output buffer descriptor. If the buffer descriptor
// is not specified the function must use either the buffer or the
// buffer provider function installed by the EFFECT_CMD_CONFIGURE command.
+// The effect framework will call the process() function after the EFFECT_CMD_ENABLE
+// command is received and until the EFFECT_CMD_DISABLE is received. When the engine
+// receives the EFFECT_CMD_DISABLE command it should turn off the effect gracefully
+// and when done indicate that it is OK to stop calling the process() function by
+// returning the -ENODATA status.
//
// NOTE: the process() function implementation should be "real-time safe" that is
// it should not perform blocking calls: malloc/free, sleep, read/write/open/close,
@@ -239,6 +244,8 @@
//
// Output:
// returned value: 0 successful operation
+// -ENODATA the engine has finished the disable phase and the framework
+// can stop calling process()
// -EINVAL invalid interface handle or
// invalid input/output buffer description
////////////////////////////////////////////////////////////////////////////////
diff --git a/include/media/EffectEqualizerApi.h b/include/media/EffectEqualizerApi.h
index e3069d5..cb05b32 100644
--- a/include/media/EffectEqualizerApi.h
+++ b/include/media/EffectEqualizerApi.h
@@ -19,14 +19,13 @@
#include <media/EffectApi.h>
+// for the definition of SL_IID_EQUALIZER
+#include "OpenSLES.h"
+
#if __cplusplus
extern "C" {
#endif
-//TODO replace by openSL ES include when available
-static const effect_uuid_t SL_IID_EQUALIZER_ = { 0x0bed4300, 0xddd6, 0x11db, 0x8f34, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } };
-const effect_uuid_t * const SL_IID_EQUALIZER = &SL_IID_EQUALIZER_;
-
/* enumerated parameters for Equalizer effect */
typedef enum
{
diff --git a/include/ui/Input.h b/include/ui/Input.h
index a2e0ba06..a7d23d4 100644
--- a/include/ui/Input.h
+++ b/include/ui/Input.h
@@ -43,7 +43,9 @@
/*
* Declare a concrete type for the NDK's input event forward declaration.
*/
-struct AInputEvent { };
+struct AInputEvent {
+ virtual ~AInputEvent() { }
+};
namespace android {
diff --git a/include/ui/InputDevice.h b/include/ui/InputDevice.h
new file mode 100644
index 0000000..4420600
--- /dev/null
+++ b/include/ui/InputDevice.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _UI_INPUT_DEVICE_H
+#define _UI_INPUT_DEVICE_H
+
+#include <ui/EventHub.h>
+#include <ui/Input.h>
+#include <utils/KeyedVector.h>
+#include <utils/threads.h>
+#include <utils/Timers.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/BitSet.h>
+
+#include <stddef.h>
+#include <unistd.h>
+
+/* Maximum pointer id value supported.
+ * (This is limited by our use of BitSet32 to track pointer assignments.) */
+#define MAX_POINTER_ID 31
+
+/* Maximum number of historical samples to average. */
+#define AVERAGING_HISTORY_SIZE 5
+
+
+namespace android {
+
+extern int32_t updateMetaState(int32_t keyCode, bool down, int32_t oldMetaState);
+extern int32_t rotateKeyCode(int32_t keyCode, int32_t orientation);
+
+/*
+ * An input device structure tracks the state of a single input device.
+ *
+ * This structure is only used by ReaderThread and is not intended to be shared with
+ * DispatcherThread (because that would require locking). This works out fine because
+ * DispatcherThread is only interested in cooked event data anyways and does not need
+ * any of the low-level data from InputDevice.
+ */
+struct InputDevice {
+ struct AbsoluteAxisInfo {
+ bool valid; // set to true if axis parameters are known, false otherwise
+
+ int32_t minValue; // minimum value
+ int32_t maxValue; // maximum value
+ int32_t range; // range of values, equal to maxValue - minValue
+ int32_t flat; // center flat position, eg. flat == 8 means center is between -8 and 8
+ int32_t fuzz; // error tolerance, eg. fuzz == 4 means value is +/- 4 due to noise
+ };
+
+ struct VirtualKey {
+ int32_t keyCode;
+ int32_t scanCode;
+ uint32_t flags;
+
+ // computed hit box, specified in touch screen coords based on known display size
+ int32_t hitLeft;
+ int32_t hitTop;
+ int32_t hitRight;
+ int32_t hitBottom;
+
+ inline bool isHit(int32_t x, int32_t y) const {
+ return x >= hitLeft && x <= hitRight && y >= hitTop && y <= hitBottom;
+ }
+ };
+
+ struct KeyboardState {
+ struct Current {
+ int32_t metaState;
+ nsecs_t downTime; // time of most recent key down
+ } current;
+
+ void reset();
+ };
+
+ struct TrackballState {
+ struct Accumulator {
+ enum {
+ FIELD_BTN_MOUSE = 1,
+ FIELD_REL_X = 2,
+ FIELD_REL_Y = 4
+ };
+
+ uint32_t fields;
+
+ bool btnMouse;
+ int32_t relX;
+ int32_t relY;
+
+ inline void clear() {
+ fields = 0;
+ }
+
+ inline bool isDirty() {
+ return fields != 0;
+ }
+ } accumulator;
+
+ struct Current {
+ bool down;
+ nsecs_t downTime;
+ } current;
+
+ struct Precalculated {
+ float xScale;
+ float yScale;
+ float xPrecision;
+ float yPrecision;
+ } precalculated;
+
+ void reset();
+ };
+
+ struct SingleTouchScreenState {
+ struct Accumulator {
+ enum {
+ FIELD_BTN_TOUCH = 1,
+ FIELD_ABS_X = 2,
+ FIELD_ABS_Y = 4,
+ FIELD_ABS_PRESSURE = 8,
+ FIELD_ABS_TOOL_WIDTH = 16
+ };
+
+ uint32_t fields;
+
+ bool btnTouch;
+ int32_t absX;
+ int32_t absY;
+ int32_t absPressure;
+ int32_t absToolWidth;
+
+ inline void clear() {
+ fields = 0;
+ }
+
+ inline bool isDirty() {
+ return fields != 0;
+ }
+ } accumulator;
+
+ struct Current {
+ bool down;
+ int32_t x;
+ int32_t y;
+ int32_t pressure;
+ int32_t size;
+ } current;
+
+ void reset();
+ };
+
+ struct MultiTouchScreenState {
+ struct Accumulator {
+ enum {
+ FIELD_ABS_MT_POSITION_X = 1,
+ FIELD_ABS_MT_POSITION_Y = 2,
+ FIELD_ABS_MT_TOUCH_MAJOR = 4,
+ FIELD_ABS_MT_WIDTH_MAJOR = 8,
+ FIELD_ABS_MT_TRACKING_ID = 16
+ };
+
+ uint32_t pointerCount;
+ struct Pointer {
+ uint32_t fields;
+
+ int32_t absMTPositionX;
+ int32_t absMTPositionY;
+ int32_t absMTTouchMajor;
+ int32_t absMTWidthMajor;
+ int32_t absMTTrackingId;
+
+ inline void clear() {
+ fields = 0;
+ }
+ } pointers[MAX_POINTERS + 1]; // + 1 to remove the need for extra range checks
+
+ inline void clear() {
+ pointerCount = 0;
+ pointers[0].clear();
+ }
+
+ inline bool isDirty() {
+ return pointerCount != 0;
+ }
+ } accumulator;
+
+ void reset();
+ };
+
+ struct PointerData {
+ uint32_t id;
+ int32_t x;
+ int32_t y;
+ int32_t pressure;
+ int32_t size;
+ };
+
+ struct TouchData {
+ uint32_t pointerCount;
+ PointerData pointers[MAX_POINTERS];
+ BitSet32 idBits;
+ uint32_t idToIndex[MAX_POINTER_ID + 1];
+
+ void copyFrom(const TouchData& other);
+
+ inline void clear() {
+ pointerCount = 0;
+ idBits.clear();
+ }
+ };
+
+ // common state used for both single-touch and multi-touch screens after the initial
+ // touch decoding has been performed
+ struct TouchScreenState {
+ Vector<VirtualKey> virtualKeys;
+
+ struct Parameters {
+ bool useBadTouchFilter;
+ bool useJumpyTouchFilter;
+ bool useAveragingTouchFilter;
+
+ AbsoluteAxisInfo xAxis;
+ AbsoluteAxisInfo yAxis;
+ AbsoluteAxisInfo pressureAxis;
+ AbsoluteAxisInfo sizeAxis;
+ } parameters;
+
+ // The touch data of the current sample being processed.
+ TouchData currentTouch;
+
+ // The touch data of the previous sample that was processed. This is updated
+ // incrementally while the current sample is being processed.
+ TouchData lastTouch;
+
+ // The time the primary pointer last went down.
+ nsecs_t downTime;
+
+ struct CurrentVirtualKeyState {
+ enum Status {
+ STATUS_UP,
+ STATUS_DOWN,
+ STATUS_CANCELED
+ };
+
+ Status status;
+ nsecs_t downTime;
+ int32_t keyCode;
+ int32_t scanCode;
+ } currentVirtualKey;
+
+ struct AveragingTouchFilterState {
+ // Individual history tracks are stored by pointer id
+ uint32_t historyStart[MAX_POINTERS];
+ uint32_t historyEnd[MAX_POINTERS];
+ struct {
+ struct {
+ int32_t x;
+ int32_t y;
+ int32_t pressure;
+ } pointers[MAX_POINTERS];
+ } historyData[AVERAGING_HISTORY_SIZE];
+ } averagingTouchFilter;
+
+ struct JumpTouchFilterState {
+ int32_t jumpyPointsDropped;
+ } jumpyTouchFilter;
+
+ struct Precalculated {
+ int32_t xOrigin;
+ float xScale;
+
+ int32_t yOrigin;
+ float yScale;
+
+ int32_t pressureOrigin;
+ float pressureScale;
+
+ int32_t sizeOrigin;
+ float sizeScale;
+ } precalculated;
+
+ void reset();
+
+ bool applyBadTouchFilter();
+ bool applyJumpyTouchFilter();
+ void applyAveragingTouchFilter();
+ void calculatePointerIds();
+
+ bool isPointInsideDisplay(int32_t x, int32_t y) const;
+ const InputDevice::VirtualKey* findVirtualKeyHit() const;
+ };
+
+ InputDevice(int32_t id, uint32_t classes, String8 name);
+
+ int32_t id;
+ uint32_t classes;
+ String8 name;
+ bool ignored;
+
+ KeyboardState keyboard;
+ TrackballState trackball;
+ TouchScreenState touchScreen;
+ union {
+ SingleTouchScreenState singleTouchScreen;
+ MultiTouchScreenState multiTouchScreen;
+ };
+
+ void reset();
+
+ inline bool isKeyboard() const { return classes & INPUT_DEVICE_CLASS_KEYBOARD; }
+ inline bool isAlphaKey() const { return classes & INPUT_DEVICE_CLASS_ALPHAKEY; }
+ inline bool isTrackball() const { return classes & INPUT_DEVICE_CLASS_TRACKBALL; }
+ inline bool isDPad() const { return classes & INPUT_DEVICE_CLASS_DPAD; }
+ inline bool isSingleTouchScreen() const { return (classes
+ & (INPUT_DEVICE_CLASS_TOUCHSCREEN | INPUT_DEVICE_CLASS_TOUCHSCREEN_MT))
+ == INPUT_DEVICE_CLASS_TOUCHSCREEN; }
+ inline bool isMultiTouchScreen() const { return classes
+ & INPUT_DEVICE_CLASS_TOUCHSCREEN_MT; }
+ inline bool isTouchScreen() const { return classes
+ & (INPUT_DEVICE_CLASS_TOUCHSCREEN | INPUT_DEVICE_CLASS_TOUCHSCREEN_MT); }
+};
+
+} // namespace android
+
+#endif // _UI_INPUT_DEVICE_H
diff --git a/include/ui/InputReader.h b/include/ui/InputReader.h
index 03c8112..85a0084 100644
--- a/include/ui/InputReader.h
+++ b/include/ui/InputReader.h
@@ -19,6 +19,7 @@
#include <ui/EventHub.h>
#include <ui/Input.h>
+#include <ui/InputDevice.h>
#include <ui/InputDispatcher.h>
#include <utils/KeyedVector.h>
#include <utils/threads.h>
@@ -30,311 +31,8 @@
#include <stddef.h>
#include <unistd.h>
-/* Maximum pointer id value supported.
- * (This is limited by our use of BitSet32 to track pointer assignments.) */
-#define MAX_POINTER_ID 32
-
-/* Maximum number of historical samples to average. */
-#define AVERAGING_HISTORY_SIZE 5
-
-
namespace android {
-extern int32_t updateMetaState(int32_t keyCode, bool down, int32_t oldMetaState);
-extern int32_t rotateKeyCode(int32_t keyCode, int32_t orientation);
-
-/*
- * An input device structure tracks the state of a single input device.
- *
- * This structure is only used by ReaderThread and is not intended to be shared with
- * DispatcherThread (because that would require locking). This works out fine because
- * DispatcherThread is only interested in cooked event data anyways and does not need
- * any of the low-level data from InputDevice.
- */
-struct InputDevice {
- struct AbsoluteAxisInfo {
- bool valid; // set to true if axis parameters are known, false otherwise
-
- int32_t minValue; // minimum value
- int32_t maxValue; // maximum value
- int32_t range; // range of values, equal to maxValue - minValue
- int32_t flat; // center flat position, eg. flat == 8 means center is between -8 and 8
- int32_t fuzz; // error tolerance, eg. fuzz == 4 means value is +/- 4 due to noise
- };
-
- struct VirtualKey {
- int32_t keyCode;
- int32_t scanCode;
- uint32_t flags;
-
- // computed hit box, specified in touch screen coords based on known display size
- int32_t hitLeft;
- int32_t hitTop;
- int32_t hitRight;
- int32_t hitBottom;
-
- inline bool isHit(int32_t x, int32_t y) const {
- return x >= hitLeft && x <= hitRight && y >= hitTop && y <= hitBottom;
- }
- };
-
- struct KeyboardState {
- struct Current {
- int32_t metaState;
- nsecs_t downTime; // time of most recent key down
- } current;
-
- void reset();
- };
-
- struct TrackballState {
- struct Accumulator {
- enum {
- FIELD_BTN_MOUSE = 1,
- FIELD_REL_X = 2,
- FIELD_REL_Y = 4
- };
-
- uint32_t fields;
-
- bool btnMouse;
- int32_t relX;
- int32_t relY;
-
- inline void clear() {
- fields = 0;
- }
-
- inline bool isDirty() {
- return fields != 0;
- }
- } accumulator;
-
- struct Current {
- bool down;
- nsecs_t downTime;
- } current;
-
- struct Precalculated {
- float xScale;
- float yScale;
- float xPrecision;
- float yPrecision;
- } precalculated;
-
- void reset();
- };
-
- struct SingleTouchScreenState {
- struct Accumulator {
- enum {
- FIELD_BTN_TOUCH = 1,
- FIELD_ABS_X = 2,
- FIELD_ABS_Y = 4,
- FIELD_ABS_PRESSURE = 8,
- FIELD_ABS_TOOL_WIDTH = 16
- };
-
- uint32_t fields;
-
- bool btnTouch;
- int32_t absX;
- int32_t absY;
- int32_t absPressure;
- int32_t absToolWidth;
-
- inline void clear() {
- fields = 0;
- }
-
- inline bool isDirty() {
- return fields != 0;
- }
- } accumulator;
-
- struct Current {
- bool down;
- int32_t x;
- int32_t y;
- int32_t pressure;
- int32_t size;
- } current;
-
- void reset();
- };
-
- struct MultiTouchScreenState {
- struct Accumulator {
- enum {
- FIELD_ABS_MT_POSITION_X = 1,
- FIELD_ABS_MT_POSITION_Y = 2,
- FIELD_ABS_MT_TOUCH_MAJOR = 4,
- FIELD_ABS_MT_WIDTH_MAJOR = 8,
- FIELD_ABS_MT_TRACKING_ID = 16
- };
-
- uint32_t pointerCount;
- struct Pointer {
- uint32_t fields;
-
- int32_t absMTPositionX;
- int32_t absMTPositionY;
- int32_t absMTTouchMajor;
- int32_t absMTWidthMajor;
- int32_t absMTTrackingId;
-
- inline void clear() {
- fields = 0;
- }
- } pointers[MAX_POINTERS + 1]; // + 1 to remove the need for extra range checks
-
- inline void clear() {
- pointerCount = 0;
- pointers[0].clear();
- }
-
- inline bool isDirty() {
- return pointerCount != 0;
- }
- } accumulator;
-
- void reset();
- };
-
- struct PointerData {
- uint32_t id;
- int32_t x;
- int32_t y;
- int32_t pressure;
- int32_t size;
- };
-
- struct TouchData {
- uint32_t pointerCount;
- PointerData pointers[MAX_POINTERS];
- BitSet32 idBits;
- uint32_t idToIndex[MAX_POINTER_ID];
-
- void copyFrom(const TouchData& other);
-
- inline void clear() {
- pointerCount = 0;
- idBits.clear();
- }
- };
-
- // common state used for both single-touch and multi-touch screens after the initial
- // touch decoding has been performed
- struct TouchScreenState {
- Vector<VirtualKey> virtualKeys;
-
- struct Parameters {
- bool useBadTouchFilter;
- bool useJumpyTouchFilter;
- bool useAveragingTouchFilter;
-
- AbsoluteAxisInfo xAxis;
- AbsoluteAxisInfo yAxis;
- AbsoluteAxisInfo pressureAxis;
- AbsoluteAxisInfo sizeAxis;
- } parameters;
-
- // The touch data of the current sample being processed.
- TouchData currentTouch;
-
- // The touch data of the previous sample that was processed. This is updated
- // incrementally while the current sample is being processed.
- TouchData lastTouch;
-
- // The time the primary pointer last went down.
- nsecs_t downTime;
-
- struct CurrentVirtualKeyState {
- enum Status {
- STATUS_UP,
- STATUS_DOWN,
- STATUS_CANCELED
- };
-
- Status status;
- nsecs_t downTime;
- int32_t keyCode;
- int32_t scanCode;
- } currentVirtualKey;
-
- struct AveragingTouchFilterState {
- // Individual history tracks are stored by pointer id
- uint32_t historyStart[MAX_POINTERS];
- uint32_t historyEnd[MAX_POINTERS];
- struct {
- struct {
- int32_t x;
- int32_t y;
- int32_t pressure;
- } pointers[MAX_POINTERS];
- } historyData[AVERAGING_HISTORY_SIZE];
- } averagingTouchFilter;
-
- struct JumpTouchFilterState {
- int32_t jumpyPointsDropped;
- } jumpyTouchFilter;
-
- struct Precalculated {
- int32_t xOrigin;
- float xScale;
-
- int32_t yOrigin;
- float yScale;
-
- int32_t pressureOrigin;
- float pressureScale;
-
- int32_t sizeOrigin;
- float sizeScale;
- } precalculated;
-
- void reset();
-
- bool applyBadTouchFilter();
- bool applyJumpyTouchFilter();
- void applyAveragingTouchFilter();
- void calculatePointerIds();
-
- bool isPointInsideDisplay(int32_t x, int32_t y) const;
- const InputDevice::VirtualKey* findVirtualKeyHit() const;
- };
-
- InputDevice(int32_t id, uint32_t classes, String8 name);
-
- int32_t id;
- uint32_t classes;
- String8 name;
- bool ignored;
-
- KeyboardState keyboard;
- TrackballState trackball;
- TouchScreenState touchScreen;
- union {
- SingleTouchScreenState singleTouchScreen;
- MultiTouchScreenState multiTouchScreen;
- };
-
- void reset();
-
- inline bool isKeyboard() const { return classes & INPUT_DEVICE_CLASS_KEYBOARD; }
- inline bool isAlphaKey() const { return classes & INPUT_DEVICE_CLASS_ALPHAKEY; }
- inline bool isTrackball() const { return classes & INPUT_DEVICE_CLASS_TRACKBALL; }
- inline bool isDPad() const { return classes & INPUT_DEVICE_CLASS_DPAD; }
- inline bool isSingleTouchScreen() const { return (classes
- & (INPUT_DEVICE_CLASS_TOUCHSCREEN | INPUT_DEVICE_CLASS_TOUCHSCREEN_MT))
- == INPUT_DEVICE_CLASS_TOUCHSCREEN; }
- inline bool isMultiTouchScreen() const { return classes
- & INPUT_DEVICE_CLASS_TOUCHSCREEN_MT; }
- inline bool isTouchScreen() const { return classes
- & (INPUT_DEVICE_CLASS_TOUCHSCREEN | INPUT_DEVICE_CLASS_TOUCHSCREEN_MT); }
-};
-
-
/*
* Input reader policy interface.
*
diff --git a/include/ui/InputTransport.h b/include/ui/InputTransport.h
index 11714d5..226d1d5 100644
--- a/include/ui/InputTransport.h
+++ b/include/ui/InputTransport.h
@@ -331,30 +331,4 @@
} // namespace android
-/*
- * NDK input queue API.
- */
-struct AInputQueue {
-public:
- /* Creates a consumer associated with an input channel. */
- explicit AInputQueue(const android::sp<android::InputChannel>& channel);
-
- /* Destroys the consumer and releases its input channel. */
- virtual ~AInputQueue();
-
- inline android::InputConsumer& getConsumer() { return mConsumer; }
-
- android::status_t consume(android::InputEvent** event);
-
- void setPollLoop(const android::sp<android::PollLoop>& pollLoop) { mPollLoop = pollLoop; }
- const android::sp<android::PollLoop> getPollLoop() const { return mPollLoop; }
-
- virtual void doDefaultKey(android::KeyEvent* keyEvent) = 0;
-
-private:
- android::InputConsumer mConsumer;
- android::PreallocatedInputEventFactory mInputEventFactory;
- android::sp<android::PollLoop> mPollLoop;
-};
-
#endif // _UI_INPUT_TRANSPORT_H
diff --git a/include/ui/KeycodeLabels.h b/include/ui/KeycodeLabels.h
index e81d0f9..c8d6ffc 100755
--- a/include/ui/KeycodeLabels.h
+++ b/include/ui/KeycodeLabels.h
@@ -17,6 +17,8 @@
#ifndef _UI_KEYCODE_LABELS_H
#define _UI_KEYCODE_LABELS_H
+#include <android/keycodes.h>
+
struct KeycodeLabel {
const char *literal;
int value;
@@ -118,117 +120,28 @@
{ "PAGE_DOWN", 93 },
{ "PICTSYMBOLS", 94 },
{ "SWITCH_CHARSET", 95 },
+ { "BUTTON_A", 96 },
+ { "BUTTON_B", 97 },
+ { "BUTTON_C", 98 },
+ { "BUTTON_X", 99 },
+ { "BUTTON_Y", 100 },
+ { "BUTTON_Z", 101 },
+ { "BUTTON_L1", 102 },
+ { "BUTTON_R1", 103 },
+ { "BUTTON_L2", 104 },
+ { "BUTTON_R2", 105 },
+ { "BUTTON_THUMBL", 106 },
+ { "BUTTON_THUMBR", 107 },
+ { "BUTTON_START", 108 },
+ { "BUTTON_SELECT", 109 },
+ { "BUTTON_MODE", 110 },
- // NOTE: If you add a new keycode here you must also add it to:
- // (enum KeyCode, in this file)
- // frameworks/base/core/java/android/view/KeyEvent.java
- // tools/puppet_master/PuppetMaster.nav_keys.py
- // frameworks/base/core/res/res/values/attrs.xml
+ // NOTE: If you add a new keycode here you must also add it to several other files.
+ // Refer to frameworks/base/core/java/android/view/KeyEvent.java for the full list.
{ NULL, 0 }
};
-// These constants need to match the above mappings.
-typedef enum KeyCode {
- kKeyCodeUnknown = 0,
-
- kKeyCodeSoftLeft = 1,
- kKeyCodeSoftRight = 2,
- kKeyCodeHome = 3,
- kKeyCodeBack = 4,
- kKeyCodeCall = 5,
- kKeyCodeEndCall = 6,
- kKeyCode0 = 7,
- kKeyCode1 = 8,
- kKeyCode2 = 9,
- kKeyCode3 = 10,
- kKeyCode4 = 11,
- kKeyCode5 = 12,
- kKeyCode6 = 13,
- kKeyCode7 = 14,
- kKeyCode8 = 15,
- kKeyCode9 = 16,
- kKeyCodeStar = 17,
- kKeyCodePound = 18,
- kKeyCodeDpadUp = 19,
- kKeyCodeDpadDown = 20,
- kKeyCodeDpadLeft = 21,
- kKeyCodeDpadRight = 22,
- kKeyCodeDpadCenter = 23,
- kKeyCodeVolumeUp = 24,
- kKeyCodeVolumeDown = 25,
- kKeyCodePower = 26,
- kKeyCodeCamera = 27,
- kKeyCodeClear = 28,
- kKeyCodeA = 29,
- kKeyCodeB = 30,
- kKeyCodeC = 31,
- kKeyCodeD = 32,
- kKeyCodeE = 33,
- kKeyCodeF = 34,
- kKeyCodeG = 35,
- kKeyCodeH = 36,
- kKeyCodeI = 37,
- kKeyCodeJ = 38,
- kKeyCodeK = 39,
- kKeyCodeL = 40,
- kKeyCodeM = 41,
- kKeyCodeN = 42,
- kKeyCodeO = 43,
- kKeyCodeP = 44,
- kKeyCodeQ = 45,
- kKeyCodeR = 46,
- kKeyCodeS = 47,
- kKeyCodeT = 48,
- kKeyCodeU = 49,
- kKeyCodeV = 50,
- kKeyCodeW = 51,
- kKeyCodeX = 52,
- kKeyCodeY = 53,
- kKeyCodeZ = 54,
- kKeyCodeComma = 55,
- kKeyCodePeriod = 56,
- kKeyCodeAltLeft = 57,
- kKeyCodeAltRight = 58,
- kKeyCodeShiftLeft = 59,
- kKeyCodeShiftRight = 60,
- kKeyCodeTab = 61,
- kKeyCodeSpace = 62,
- kKeyCodeSym = 63,
- kKeyCodeExplorer = 64,
- kKeyCodeEnvelope = 65,
- kKeyCodeNewline = 66,
- kKeyCodeDel = 67,
- kKeyCodeGrave = 68,
- kKeyCodeMinus = 69,
- kKeyCodeEquals = 70,
- kKeyCodeLeftBracket = 71,
- kKeyCodeRightBracket = 72,
- kKeyCodeBackslash = 73,
- kKeyCodeSemicolon = 74,
- kKeyCodeApostrophe = 75,
- kKeyCodeSlash = 76,
- kKeyCodeAt = 77,
- kKeyCodeNum = 78,
- kKeyCodeHeadSetHook = 79,
- kKeyCodeFocus = 80,
- kKeyCodePlus = 81,
- kKeyCodeMenu = 82,
- kKeyCodeNotification = 83,
- kKeyCodeSearch = 84,
- kKeyCodePlayPause = 85,
- kKeyCodeStop = 86,
- kKeyCodeNextSong = 87,
- kKeyCodePreviousSong = 88,
- kKeyCodeRewind = 89,
- kKeyCodeForward = 90,
- kKeyCodeMute = 91,
- kKeyCodePageUp = 92,
- kKeyCodePageDown = 93,
- kKeyCodePictSymbols = 94,
- kKeyCodeSwitchCharset = 95
-} KeyCode;
-
static const KeycodeLabel FLAGS[] = {
{ "WAKE", 0x00000001 },
{ "WAKE_DROPPED", 0x00000002 },
diff --git a/libs/binder/IPermissionController.cpp b/libs/binder/IPermissionController.cpp
index bff4c9b..e13036f 100644
--- a/libs/binder/IPermissionController.cpp
+++ b/libs/binder/IPermissionController.cpp
@@ -36,7 +36,7 @@
: BpInterface<IPermissionController>(impl)
{
}
-
+
virtual bool checkPermission(const String16& permission, int32_t pid, int32_t uid)
{
Parcel data, reply;
@@ -46,7 +46,7 @@
data.writeInt32(uid);
remote()->transact(CHECK_PERMISSION_TRANSACTION, data, &reply);
// fail on exception
- if (reply.readInt32() != 0) return 0;
+ if (reply.readExceptionCode() != 0) return 0;
return reply.readInt32() != 0;
}
};
@@ -66,8 +66,7 @@
int32_t pid = data.readInt32();
int32_t uid = data.readInt32();
bool res = checkPermission(permission, pid, uid);
- // write exception
- reply->writeInt32(0);
+ reply->writeNoException();
reply->writeInt32(res ? 1 : 0);
return NO_ERROR;
} break;
@@ -77,4 +76,3 @@
}
}; // namespace android
-
diff --git a/libs/binder/IServiceManager.cpp b/libs/binder/IServiceManager.cpp
index a3a3f0e..1fa4c35 100644
--- a/libs/binder/IServiceManager.cpp
+++ b/libs/binder/IServiceManager.cpp
@@ -158,7 +158,7 @@
data.writeString16(name);
data.writeStrongBinder(service);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
- return err == NO_ERROR ? reply.readInt32() : err;
+ return err == NO_ERROR ? reply.readExceptionCode() : err;
}
virtual Vector<String16> listServices()
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index c2574bd..47be1bf 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -754,6 +754,11 @@
goto restart_write;
}
+status_t Parcel::writeNoException()
+{
+ return writeInt32(0);
+}
+
void Parcel::remove(size_t start, size_t amt)
{
LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
@@ -942,6 +947,12 @@
return val;
}
+int32_t Parcel::readExceptionCode() const
+{
+ int32_t exception_code = readAligned<int32_t>();
+ // TODO: skip over the response header here, once that's in.
+ return exception_code;
+}
native_handle* Parcel::readNativeHandle() const
{
diff --git a/libs/gui/Android.mk b/libs/gui/Android.mk
new file mode 100644
index 0000000..249558a
--- /dev/null
+++ b/libs/gui/Android.mk
@@ -0,0 +1,25 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ ISensorEventConnection.cpp \
+ ISensorServer.cpp \
+ Sensor.cpp \
+ SensorChannel.cpp \
+ SensorEventQueue.cpp \
+ SensorManager.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libutils \
+ libbinder \
+ libhardware \
+ libhardware_legacy
+
+LOCAL_MODULE:= libgui
+
+ifeq ($(TARGET_SIMULATOR),true)
+ LOCAL_LDLIBS += -lpthread
+endif
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/libs/gui/ISensorEventConnection.cpp b/libs/gui/ISensorEventConnection.cpp
new file mode 100644
index 0000000..3e9d456
--- /dev/null
+++ b/libs/gui/ISensorEventConnection.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+#include <binder/Parcel.h>
+#include <binder/IInterface.h>
+
+#include <gui/ISensorEventConnection.h>
+#include <gui/SensorChannel.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+enum {
+ GET_SENSOR_CHANNEL = IBinder::FIRST_CALL_TRANSACTION,
+ ENABLE_DISABLE,
+ SET_EVENT_RATE
+};
+
+class BpSensorEventConnection : public BpInterface<ISensorEventConnection>
+{
+public:
+ BpSensorEventConnection(const sp<IBinder>& impl)
+ : BpInterface<ISensorEventConnection>(impl)
+ {
+ }
+
+ virtual sp<SensorChannel> getSensorChannel() const
+ {
+ Parcel data, reply;
+ remote()->transact(GET_SENSOR_CHANNEL, data, &reply);
+ return new SensorChannel(reply);
+ }
+
+ virtual status_t enableDisable(int handle, bool enabled)
+ {
+ Parcel data, reply;
+ data.writeInt32(handle);
+ data.writeInt32(enabled);
+ remote()->transact(ENABLE_DISABLE, data, &reply);
+ return reply.readInt32();
+ }
+
+ virtual status_t setEventRate(int handle, nsecs_t ns)
+ {
+ Parcel data, reply;
+ data.writeInt32(handle);
+ data.writeInt64(ns);
+ remote()->transact(SET_EVENT_RATE, data, &reply);
+ return reply.readInt32();
+ }
+};
+
+IMPLEMENT_META_INTERFACE(SensorEventConnection, "android.gui.SensorEventConnection");
+
+// ----------------------------------------------------------------------------
+
+status_t BnSensorEventConnection::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case GET_SENSOR_CHANNEL: {
+ CHECK_INTERFACE(ISensorEventConnection, data, reply);
+ sp<SensorChannel> channel(getSensorChannel());
+ channel->writeToParcel(reply);
+ return NO_ERROR;
+ } break;
+ case ENABLE_DISABLE: {
+ CHECK_INTERFACE(ISensorEventConnection, data, reply);
+ int handle = data.readInt32();
+ int enabled = data.readInt32();
+ status_t result = enableDisable(handle, enabled);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+ case SET_EVENT_RATE: {
+ CHECK_INTERFACE(ISensorEventConnection, data, reply);
+ int handle = data.readInt32();
+ int ns = data.readInt64();
+ status_t result = setEventRate(handle, ns);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+ }
+ return BBinder::onTransact(code, data, reply, flags);
+}
+
+// ----------------------------------------------------------------------------
+}; // namespace android
diff --git a/libs/gui/ISensorServer.cpp b/libs/gui/ISensorServer.cpp
new file mode 100644
index 0000000..c6177bc
--- /dev/null
+++ b/libs/gui/ISensorServer.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+#include <utils/Timers.h>
+
+#include <binder/Parcel.h>
+#include <binder/IInterface.h>
+
+#include <gui/Sensor.h>
+#include <gui/ISensorServer.h>
+#include <gui/ISensorEventConnection.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+enum {
+ GET_SENSOR_LIST = IBinder::FIRST_CALL_TRANSACTION,
+ CREATE_SENSOR_EVENT_CONNECTION,
+};
+
+class BpSensorServer : public BpInterface<ISensorServer>
+{
+public:
+ BpSensorServer(const sp<IBinder>& impl)
+ : BpInterface<ISensorServer>(impl)
+ {
+ }
+
+ virtual Vector<Sensor> getSensorList()
+ {
+ Parcel data, reply;
+ remote()->transact(GET_SENSOR_LIST, data, &reply);
+ Sensor s;
+ Vector<Sensor> v;
+ int32_t n = reply.readInt32();
+ v.setCapacity(n);
+ while (n--) {
+ reply.read(static_cast<Flattenable&>(s));
+ v.add(s);
+ }
+ return v;
+ }
+
+ virtual sp<ISensorEventConnection> createSensorEventConnection()
+ {
+ Parcel data, reply;
+ remote()->transact(CREATE_SENSOR_EVENT_CONNECTION, data, &reply);
+ return interface_cast<ISensorEventConnection>(reply.readStrongBinder());
+ }
+};
+
+IMPLEMENT_META_INTERFACE(SensorServer, "android.gui.SensorServer");
+
+// ----------------------------------------------------------------------
+
+status_t BnSensorServer::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case GET_SENSOR_LIST: {
+ CHECK_INTERFACE(ISensorServer, data, reply);
+ Vector<Sensor> v(getSensorList());
+ size_t n = v.size();
+ reply->writeInt32(n);
+ for (size_t i=0 ; i<n ; i++) {
+ reply->write(static_cast<const Flattenable&>(v[i]));
+ }
+ return NO_ERROR;
+ } break;
+ case CREATE_SENSOR_EVENT_CONNECTION: {
+ CHECK_INTERFACE(ISensorServer, data, reply);
+ sp<ISensorEventConnection> connection(createSensorEventConnection());
+ reply->writeStrongBinder(connection->asBinder());
+ return NO_ERROR;
+ } break;
+ }
+ return BBinder::onTransact(code, data, reply, flags);
+}
+
+// ----------------------------------------------------------------------------
+}; // namespace android
diff --git a/libs/gui/Sensor.cpp b/libs/gui/Sensor.cpp
new file mode 100644
index 0000000..1fdd285
--- /dev/null
+++ b/libs/gui/Sensor.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <utils/Flattenable.h>
+
+#include <hardware/sensors.h>
+
+#include <gui/Sensor.h>
+
+// ----------------------------------------------------------------------------
+namespace android {
+// ----------------------------------------------------------------------------
+
+Sensor::Sensor()
+ : mHandle(0), mType(0),
+ mMinValue(0), mMaxValue(0), mResolution(0),
+ mPower(0)
+{
+}
+
+Sensor::~Sensor()
+{
+}
+
+const String8& Sensor::getName() const {
+ return mName;
+}
+
+const String8& Sensor::getVendor() const {
+ return mVendor;
+}
+
+int32_t Sensor::getHandle() const {
+ return mHandle;
+}
+
+int32_t Sensor::getType() const {
+ return mType;
+}
+
+float Sensor::getMinValue() const {
+ return mMinValue;
+}
+
+float Sensor::getMaxValue() const {
+ return mMaxValue;
+}
+
+float Sensor::getResolution() const {
+ return mResolution;
+}
+
+float Sensor::getPowerUsage() const {
+ return mPower;
+}
+
+size_t Sensor::getFlattenedSize() const
+{
+ return sizeof(int32_t) + ((mName.length() + 3) & ~3) +
+ sizeof(int32_t) + ((mVendor.length() + 3) & ~3) +
+ sizeof(int32_t) * 2 +
+ sizeof(float) * 3;
+}
+
+size_t Sensor::getFdCount() const
+{
+ return 0;
+}
+
+static inline
+size_t write(void* buffer, size_t offset, const String8& value) {
+ memcpy(static_cast<char*>(buffer) + offset, value.string(), value.length());
+ return (value.length() + 3) & ~3;
+}
+
+static inline
+size_t write(void* buffer, size_t offset, float value) {
+ *reinterpret_cast<float*>(static_cast<char*>(buffer) + offset) = value;
+ return sizeof(float);
+}
+
+static inline
+size_t write(void* buffer, size_t offset, int32_t value) {
+ *reinterpret_cast<int32_t*>(static_cast<char*>(buffer) + offset) = value;
+ return sizeof(int32_t);
+}
+
+status_t Sensor::flatten(void* buffer, size_t size,
+ int fds[], size_t count) const
+{
+ if (size < Sensor::getFlattenedSize())
+ return -ENOMEM;
+
+ size_t offset = 0;
+ offset += write(buffer, offset, int32_t(mName.length()));
+ offset += write(buffer, offset, mName);
+ offset += write(buffer, offset, int32_t(mVendor.length()));
+ offset += write(buffer, offset, mVendor);
+ offset += write(buffer, offset, mHandle);
+ offset += write(buffer, offset, mType);
+ offset += write(buffer, offset, mMinValue);
+ offset += write(buffer, offset, mMaxValue);
+ offset += write(buffer, offset, mResolution);
+ offset += write(buffer, offset, mPower);
+
+ return NO_ERROR;
+}
+
+static inline
+size_t read(void const* buffer, size_t offset, String8* value, int32_t len) {
+ value->setTo(static_cast<char const*>(buffer) + offset, len);
+ return (len + 3) & ~3;
+}
+
+static inline
+size_t read(void const* buffer, size_t offset, float* value) {
+ *value = *reinterpret_cast<float const*>(static_cast<char const*>(buffer) + offset);
+ return sizeof(float);
+}
+
+static inline
+size_t read(void const* buffer, size_t offset, int32_t* value) {
+ *value = *reinterpret_cast<int32_t const*>(static_cast<char const*>(buffer) + offset);
+ return sizeof(int32_t);
+}
+
+status_t Sensor::unflatten(void const* buffer, size_t size,
+ int fds[], size_t count)
+{
+ int32_t len;
+ size_t offset = 0;
+ offset += read(buffer, offset, &len);
+ offset += read(buffer, offset, &mName, len);
+ offset += read(buffer, offset, &len);
+ offset += read(buffer, offset, &mVendor, len);
+ offset += read(buffer, offset, &mHandle);
+ offset += read(buffer, offset, &mType);
+ offset += read(buffer, offset, &mMinValue);
+ offset += read(buffer, offset, &mMaxValue);
+ offset += read(buffer, offset, &mResolution);
+ offset += read(buffer, offset, &mPower);
+
+ return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+}; // namespace android
diff --git a/libs/gui/SensorChannel.cpp b/libs/gui/SensorChannel.cpp
new file mode 100644
index 0000000..147e1c2
--- /dev/null
+++ b/libs/gui/SensorChannel.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <utils/Errors.h>
+
+#include <binder/Parcel.h>
+
+#include <gui/SensorChannel.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+SensorChannel::SensorChannel()
+ : mSendFd(-1), mReceiveFd(-1)
+{
+ int fds[2];
+ if (pipe(fds) == 0) {
+ mReceiveFd = fds[0];
+ mSendFd = fds[1];
+ fcntl(mReceiveFd, F_SETFL, O_NONBLOCK);
+ fcntl(mSendFd, F_SETFL, O_NONBLOCK);
+ }
+}
+
+SensorChannel::SensorChannel(const Parcel& data)
+ : mSendFd(-1), mReceiveFd(-1)
+{
+ mReceiveFd = dup(data.readFileDescriptor());
+ fcntl(mReceiveFd, F_SETFL, O_NONBLOCK);
+}
+
+SensorChannel::~SensorChannel()
+{
+ if (mSendFd >= 0)
+ close(mSendFd);
+
+ if (mReceiveFd >= 0)
+ close(mReceiveFd);
+}
+
+int SensorChannel::getFd() const
+{
+ return mReceiveFd;
+}
+
+ssize_t SensorChannel::write(void const* vaddr, size_t size)
+{
+ ssize_t len = ::write(mSendFd, vaddr, size);
+ if (len < 0)
+ return -errno;
+ return len;
+}
+
+ssize_t SensorChannel::read(void* vaddr, size_t size)
+{
+ ssize_t len = ::read(mReceiveFd, vaddr, size);
+ if (len < 0)
+ return -errno;
+ return len;
+}
+
+status_t SensorChannel::writeToParcel(Parcel* reply) const
+{
+ if (mReceiveFd < 0)
+ return -EINVAL;
+
+ status_t result = reply->writeDupFileDescriptor(mReceiveFd);
+ close(mReceiveFd);
+ mReceiveFd = -1;
+ return result;
+}
+
+// ----------------------------------------------------------------------------
+}; // namespace android
diff --git a/libs/gui/SensorEventQueue.cpp b/libs/gui/SensorEventQueue.cpp
new file mode 100644
index 0000000..f922ac4
--- /dev/null
+++ b/libs/gui/SensorEventQueue.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <gui/Sensor.h>
+#include <gui/SensorChannel.h>
+#include <gui/SensorEventQueue.h>
+#include <gui/ISensorEventConnection.h>
+
+#include <android/sensor.h>
+
+// ----------------------------------------------------------------------------
+namespace android {
+// ----------------------------------------------------------------------------
+
+SensorEventQueue::SensorEventQueue(const sp<ISensorEventConnection>& connection)
+ : mSensorEventConnection(connection)
+{
+}
+
+SensorEventQueue::~SensorEventQueue()
+{
+}
+
+void SensorEventQueue::onFirstRef()
+{
+ mSensorChannel = mSensorEventConnection->getSensorChannel();
+}
+
+int SensorEventQueue::getFd() const
+{
+ return mSensorChannel->getFd();
+}
+
+ssize_t SensorEventQueue::write(ASensorEvent const* events, size_t numEvents)
+{
+ ssize_t size = mSensorChannel->write(events, numEvents * sizeof(events[0]));
+ if (size >= 0) {
+ if (size % sizeof(events[0])) {
+ // partial write!!! should never happen.
+ return -EINVAL;
+ }
+ // returns number of events written
+ size /= sizeof(events[0]);
+ }
+ return size;
+}
+
+ssize_t SensorEventQueue::read(ASensorEvent* events, size_t numEvents)
+{
+ ssize_t size = mSensorChannel->read(events, numEvents*sizeof(events[0]));
+ if (size >= 0) {
+ if (size % sizeof(events[0])) {
+ // partial write!!! should never happen.
+ return -EINVAL;
+ }
+ // returns number of events read
+ size /= sizeof(events[0]);
+ }
+ return size;
+}
+
+status_t SensorEventQueue::enableSensor(Sensor const* sensor) const
+{
+ return mSensorEventConnection->enableDisable(sensor->getHandle(), true);
+}
+
+status_t SensorEventQueue::disableSensor(Sensor const* sensor) const
+{
+ return mSensorEventConnection->enableDisable(sensor->getHandle(), false);
+}
+
+status_t SensorEventQueue::setEventRate(Sensor const* sensor, nsecs_t ns) const
+{
+ return mSensorEventConnection->setEventRate(sensor->getHandle(), ns);
+}
+
+// ----------------------------------------------------------------------------
+}; // namespace android
+
diff --git a/libs/gui/SensorManager.cpp b/libs/gui/SensorManager.cpp
new file mode 100644
index 0000000..cd89285
--- /dev/null
+++ b/libs/gui/SensorManager.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Singleton.h>
+
+#include <gui/ISensorServer.h>
+#include <gui/ISensorEventConnection.h>
+#include <gui/Sensor.h>
+#include <gui/SensorManager.h>
+#include <gui/SensorEventQueue.h>
+
+// ----------------------------------------------------------------------------
+namespace android {
+// ----------------------------------------------------------------------------
+
+ANDROID_SINGLETON_STATIC_INSTANCE(SensorManager)
+
+SensorManager::SensorManager()
+ : mSensorList(0)
+{
+ mSensors = mSensorServer->getSensorList();
+ // TODO: needs implementation
+}
+
+SensorManager::~SensorManager()
+{
+ // TODO: needs implementation
+}
+
+ssize_t SensorManager::getSensorList(Sensor** list) const
+{
+ *list = mSensorList;
+ return mSensors.size();
+}
+
+Sensor* SensorManager::getDefaultSensor(int type)
+{
+ // TODO: needs implementation
+ return mSensorList;
+}
+
+sp<SensorEventQueue> SensorManager::createEventQueue()
+{
+ sp<SensorEventQueue> result = new SensorEventQueue(
+ mSensorServer->createSensorEventConnection());
+ return result;
+}
+
+// ----------------------------------------------------------------------------
+}; // namespace android
diff --git a/libs/hwui/Matrix.h b/libs/hwui/Matrix.h
index 40c80fa..ba5be03 100644
--- a/libs/hwui/Matrix.h
+++ b/libs/hwui/Matrix.h
@@ -88,6 +88,9 @@
void copyTo(float* v) const;
void copyTo(SkMatrix& v) const;
+ /**
+ * Does not apply rotations!
+ */
void mapRect(Rect& r) const;
float getTranslateX();
diff --git a/libs/hwui/OpenGLRenderer.cpp b/libs/hwui/OpenGLRenderer.cpp
index 6d041ae..12b0dea 100644
--- a/libs/hwui/OpenGLRenderer.cpp
+++ b/libs/hwui/OpenGLRenderer.cpp
@@ -118,6 +118,7 @@
mDrawColorShader = new DrawColorProgram;
mDrawTextureShader = new DrawTextureProgram;
+ mCurrentShader = mDrawTextureShader;
memcpy(mDrawTextureVertices, gDrawTextureVertices, sizeof(gDrawTextureVertices));
}
@@ -127,6 +128,7 @@
mTextureCache.clear();
mLayerCache.clear();
+ mPatchCache.clear();
}
///////////////////////////////////////////////////////////////////////////////
@@ -136,9 +138,7 @@
void OpenGLRenderer::setViewport(int width, int height) {
glViewport(0, 0, width, height);
- mat4 ortho;
- ortho.loadOrtho(0, width, height, 0, -1, 1);
- ortho.copyTo(mOrthoMatrix);
+ mOrthoMatrix.loadOrtho(0, width, height, 0, -1, 1);
mWidth = width;
mHeight = height;
@@ -208,7 +208,7 @@
sp<Snapshot> previous = mSnapshot->previous;
if (restoreOrtho) {
- memcpy(mOrthoMatrix, current->orthoMatrix, sizeof(mOrthoMatrix));
+ mOrthoMatrix.load(current->orthoMatrix);
}
if (restoreLayer) {
@@ -333,12 +333,10 @@
mSnapshot->flags = Snapshot::kFlagDirtyTransform | Snapshot::kFlagDirtyOrtho |
Snapshot::kFlagClipSet;
- memcpy(mSnapshot->orthoMatrix, mOrthoMatrix, sizeof(mOrthoMatrix));
+ mSnapshot->orthoMatrix.load(mOrthoMatrix);
// Change the ortho projection
- mat4 ortho;
- ortho.loadOrtho(0.0f, right - left, bottom - top, 0.0f, 0.0f, 1.0f);
- ortho.copyTo(mOrthoMatrix);
+ mOrthoMatrix.loadOrtho(0.0f, right - left, bottom - top, 0.0f, 0.0f, 1.0f);
return true;
}
@@ -408,7 +406,7 @@
}
bool OpenGLRenderer::clipRect(float left, float top, float right, float bottom) {
- bool clipped = mSnapshot->clipRect.intersect(left, top, right, bottom);
+ bool clipped = mSnapshot->clip(left, top, right, bottom);
if (clipped) {
mSnapshot->flags |= Snapshot::kFlagClipSet;
setScissorFromClip();
@@ -421,8 +419,15 @@
///////////////////////////////////////////////////////////////////////////////
void OpenGLRenderer::drawBitmap(SkBitmap* bitmap, float left, float top, const SkPaint* paint) {
+ const float right = left + bitmap->width();
+ const float bottom = top + bitmap->height();
+
+ if (quickReject(left, top, right, bottom)) {
+ return;
+ }
+
const Texture* texture = mTextureCache.get(bitmap);
- drawTextureRect(left, top, left + texture->width, top + texture->height, texture, paint);
+ drawTextureRect(left, top, right, bottom, texture, paint);
}
void OpenGLRenderer::drawBitmap(SkBitmap* bitmap, const SkMatrix* matrix, const SkPaint* paint) {
@@ -430,6 +435,10 @@
const mat4 transform(*matrix);
transform.mapRect(r);
+ if (quickReject(r.left, r.top, r.right, r.bottom)) {
+ return;
+ }
+
const Texture* texture = mTextureCache.get(bitmap);
drawTextureRect(r.left, r.top, r.right, r.bottom, texture, paint);
}
@@ -438,6 +447,10 @@
float srcLeft, float srcTop, float srcRight, float srcBottom,
float dstLeft, float dstTop, float dstRight, float dstBottom,
const SkPaint* paint) {
+ if (quickReject(dstLeft, dstTop, dstRight, dstBottom)) {
+ return;
+ }
+
const Texture* texture = mTextureCache.get(bitmap);
const float width = texture->width;
@@ -457,6 +470,10 @@
void OpenGLRenderer::drawPatch(SkBitmap* bitmap, Res_png_9patch* patch,
float left, float top, float right, float bottom, const SkPaint* paint) {
+ if (quickReject(left, top, right, bottom)) {
+ return;
+ }
+
const Texture* texture = mTextureCache.get(bitmap);
int alpha;
@@ -469,17 +486,21 @@
// Specify right and bottom as +1.0f from left/top to prevent scaling since the
// patch mesh already defines the final size
- drawTextureMesh(left, top, left + 1.0f, top + 1.0f, texture->id, alpha / 255.0f, mode,
- texture->blend, true, &mesh->vertices[0].position[0],
+ drawTextureMesh(left, top, left + 1.0f, top + 1.0f, texture->id, alpha / 255.0f,
+ mode, texture->blend, &mesh->vertices[0].position[0],
&mesh->vertices[0].texture[0], mesh->indices, mesh->indicesCount);
}
void OpenGLRenderer::drawColor(int color, SkXfermode::Mode mode) {
- const Rect& clip = mSnapshot->clipRect;
- drawColorRect(clip.left, clip.top, clip.right, clip.bottom, color, mode);
+ const Rect& clip = mSnapshot->getMappedClip();
+ drawColorRect(clip.left, clip.top, clip.right, clip.bottom, color, mode, true);
}
void OpenGLRenderer::drawRect(float left, float top, float right, float bottom, const SkPaint* p) {
+ if (quickReject(left, top, right, bottom)) {
+ return;
+ }
+
SkXfermode::Mode mode;
const bool isMode = SkXfermode::IsMode(p->getXfermode(), &mode);
@@ -498,73 +519,79 @@
drawColorRect(left, top, right, bottom, color, mode);
}
+///////////////////////////////////////////////////////////////////////////////
+// Drawing implementation
+///////////////////////////////////////////////////////////////////////////////
+
void OpenGLRenderer::drawColorRect(float left, float top, float right, float bottom,
- int color, SkXfermode::Mode mode) {
+ int color, SkXfermode::Mode mode, bool ignoreTransform) {
const int alpha = (color >> 24) & 0xFF;
const GLfloat a = alpha / 255.0f;
const GLfloat r = ((color >> 16) & 0xFF) / 255.0f;
const GLfloat g = ((color >> 8) & 0xFF) / 255.0f;
const GLfloat b = ((color ) & 0xFF) / 255.0f;
- chooseBlending(alpha < 255, mode, true);
+ // Pre-multiplication happens when setting the shader color
+ chooseBlending(alpha < 255, mode);
mModelView.loadTranslate(left, top, 0.0f);
mModelView.scale(right - left, bottom - top, 1.0f);
- mDrawColorShader->use(&mOrthoMatrix[0], &mModelView.data[0], &mSnapshot->transform.data[0]);
+ const bool inUse = useShader(mDrawColorShader);
+ if (!ignoreTransform) {
+ mDrawColorShader->set(mOrthoMatrix, mModelView, mSnapshot->transform);
+ } else {
+ mat4 identity;
+ mDrawColorShader->set(mOrthoMatrix, mModelView, identity);
+ }
- const GLvoid* p = &gDrawColorVertices[0].position[0];
-
- glEnableVertexAttribArray(mDrawColorShader->position);
- glVertexAttribPointer(mDrawColorShader->position, 2, GL_FLOAT, GL_FALSE,
- gDrawColorVertexStride, p);
- glUniform4f(mDrawColorShader->color, r, g, b, a);
+ if (!inUse) {
+ const GLvoid* p = &gDrawColorVertices[0].position[0];
+ glVertexAttribPointer(mDrawColorShader->position, 2, GL_FLOAT, GL_FALSE,
+ gDrawColorVertexStride, p);
+ }
+ // Render using pre-multiplied alpha
+ glUniform4f(mDrawColorShader->color, r * a, g * a, b * a, a);
glDrawArrays(GL_TRIANGLE_STRIP, 0, gDrawColorVertexCount);
-
- glDisableVertexAttribArray(mDrawColorShader->position);
}
void OpenGLRenderer::drawTextureRect(float left, float top, float right, float bottom,
- const Texture* texture, const SkPaint* paint, bool isPremultiplied) {
+ const Texture* texture, const SkPaint* paint) {
int alpha;
SkXfermode::Mode mode;
getAlphaAndMode(paint, &alpha, &mode);
drawTextureMesh(left, top, right, bottom, texture->id, alpha / 255.0f, mode, texture->blend,
- isPremultiplied, &mDrawTextureVertices[0].position[0],
- &mDrawTextureVertices[0].texture[0], NULL);
+ &mDrawTextureVertices[0].position[0], &mDrawTextureVertices[0].texture[0], NULL);
}
void OpenGLRenderer::drawTextureRect(float left, float top, float right, float bottom,
- GLuint texture, float alpha, SkXfermode::Mode mode, bool blend, bool isPremultiplied) {
- drawTextureMesh(left, top, right, bottom, texture, alpha, mode, blend, isPremultiplied,
+ GLuint texture, float alpha, SkXfermode::Mode mode, bool blend) {
+ drawTextureMesh(left, top, right, bottom, texture, alpha, mode, blend,
&mDrawTextureVertices[0].position[0], &mDrawTextureVertices[0].texture[0], NULL);
}
void OpenGLRenderer::drawTextureMesh(float left, float top, float right, float bottom,
- GLuint texture, float alpha, SkXfermode::Mode mode, bool blend, bool isPremultiplied,
+ GLuint texture, float alpha, SkXfermode::Mode mode, bool blend,
GLvoid* vertices, GLvoid* texCoords, GLvoid* indices, GLsizei elementsCount) {
mModelView.loadTranslate(left, top, 0.0f);
mModelView.scale(right - left, bottom - top, 1.0f);
- mDrawTextureShader->use(&mOrthoMatrix[0], &mModelView.data[0], &mSnapshot->transform.data[0]);
+ useShader(mDrawTextureShader);
+ mDrawTextureShader->set(mOrthoMatrix, mModelView, mSnapshot->transform);
- chooseBlending(blend || alpha < 1.0f, mode, isPremultiplied);
+ chooseBlending(blend || alpha < 1.0f, mode);
glBindTexture(GL_TEXTURE_2D, texture);
// TODO handle tiling and filtering here
- glActiveTexture(GL_TEXTURE0);
- glUniform1i(mDrawTextureShader->sampler, 0);
- glUniform4f(mDrawTextureShader->color, 1.0f, 1.0f, 1.0f, alpha);
+ // Always premultiplied
+ glUniform4f(mDrawTextureShader->color, alpha, alpha, alpha, alpha);
- glEnableVertexAttribArray(mDrawTextureShader->position);
glVertexAttribPointer(mDrawTextureShader->position, 2, GL_FLOAT, GL_FALSE,
gDrawTextureVertexStride, vertices);
-
- glEnableVertexAttribArray(mDrawTextureShader->texCoords);
glVertexAttribPointer(mDrawTextureShader->texCoords, 2, GL_FLOAT, GL_FALSE,
gDrawTextureVertexStride, texCoords);
@@ -574,9 +601,6 @@
glDrawElements(GL_TRIANGLES, elementsCount, GL_UNSIGNED_SHORT, indices);
}
- glDisableVertexAttribArray(mDrawTextureShader->position);
- glDisableVertexAttribArray(mDrawTextureShader->texCoords);
-
glBindTexture(GL_TEXTURE_2D, 0);
}
@@ -606,6 +630,16 @@
mBlend = blend;
}
+bool OpenGLRenderer::useShader(const sp<Program>& shader) {
+ if (!shader->isInUse()) {
+ mCurrentShader->remove();
+ shader->use();
+ mCurrentShader = shader;
+ return false;
+ }
+ return true;
+}
+
void OpenGLRenderer::resetDrawTextureTexCoords(float u1, float v1, float u2, float v2) {
TextureVertex* v = &mDrawTextureVertices[0];
TextureVertex::setUV(v++, u1, v1);
diff --git a/libs/hwui/OpenGLRenderer.h b/libs/hwui/OpenGLRenderer.h
index 76d6e06..8083038 100644
--- a/libs/hwui/OpenGLRenderer.h
+++ b/libs/hwui/OpenGLRenderer.h
@@ -160,9 +160,10 @@
* @param bottom The bottom coordinate of the rectangle
* @param color The rectangle's ARGB color, defined as a packed 32 bits word
* @param mode The Skia xfermode to use
+ * @param ignoreTransform True if the current transform should be ignored
*/
void drawColorRect(float left, float top, float right, float bottom,
- int color, SkXfermode::Mode mode);
+ int color, SkXfermode::Mode mode, bool ignoreTransform = false);
/**
* Draws a textured rectangle with the specified texture. The specified coordinates
@@ -176,10 +177,9 @@
* @param alpha An additional translucency parameter, between 0.0f and 1.0f
* @param mode The blending mode
* @param blend True if the texture contains an alpha channel
- * @param isPremultiplied Indicates whether the texture has premultiplied alpha
*/
void drawTextureRect(float left, float top, float right, float bottom, GLuint texture,
- float alpha, SkXfermode::Mode mode, bool blend, bool isPremultiplied = true);
+ float alpha, SkXfermode::Mode mode, bool blend);
/**
* Draws a textured rectangle with the specified texture. The specified coordinates
@@ -191,10 +191,9 @@
* @param bottom The bottom coordinate of the rectangle
* @param texture The texture to use
* @param paint The paint containing the alpha, blending mode, etc.
- * @param isPremultiplied Indicates whether the texture has premultiplied alpha
*/
- void drawTextureRect(float left, float top, float right, float bottom, const Texture* texture,
- const SkPaint* paint, bool isPremultiplied = true);
+ void drawTextureRect(float left, float top, float right, float bottom,
+ const Texture* texture, const SkPaint* paint);
/**
* Draws a textured mesh with the specified texture. If the indices are omitted, the
@@ -208,14 +207,13 @@
* @param alpha An additional translucency parameter, between 0.0f and 1.0f
* @param mode The blending mode
* @param blend True if the texture contains an alpha channel
- * @param isPremultiplied Indicates whether the texture has premultiplied alpha
* @param vertices The vertices that define the mesh
* @param texCoords The texture coordinates of each vertex
* @param indices The indices of the vertices, can be NULL
* @param elementsCount The number of elements in the mesh, required by indices
*/
void drawTextureMesh(float left, float top, float right, float bottom, GLuint texture,
- float alpha, SkXfermode::Mode mode, bool blend, bool isPremultiplied,
+ float alpha, SkXfermode::Mode mode, bool blend,
GLvoid* vertices, GLvoid* texCoords, GLvoid* indices, GLsizei elementsCount = 0);
/**
@@ -245,13 +243,23 @@
* Enable or disable blending as necessary. This function sets the appropriate
* blend function based on the specified xfermode.
*/
- inline void chooseBlending(bool blend, SkXfermode::Mode mode, bool isPremultiplied);
+ inline void chooseBlending(bool blend, SkXfermode::Mode mode, bool isPremultiplied = true);
+
+ /**
+ * Use the specified shader with the current GL context. If the shader is already
+ * in use, it will not be bound again. If it is not in use, the current shader is
+ * marked unused and the specified shader becomes used and becomes the new
+ * current shader.
+ *
+ * @return true If the specified shader was already in use, false otherwise.
+ */
+ inline bool useShader(const sp<Program>& shader);
// Dimensions of the drawing surface
int mWidth, mHeight;
// Matrix used for ortho projection in shaders
- float mOrthoMatrix[16];
+ mat4 mOrthoMatrix;
// Model-view matrix used to position/size objects
mat4 mModelView;
@@ -264,6 +272,7 @@
sp<Snapshot> mSnapshot;
// Shaders
+ sp<Program> mCurrentShader;
sp<DrawColorProgram> mDrawColorShader;
sp<DrawTextureProgram> mDrawTextureShader;
diff --git a/libs/hwui/Program.cpp b/libs/hwui/Program.cpp
index 819e736..609b28a 100644
--- a/libs/hwui/Program.cpp
+++ b/libs/hwui/Program.cpp
@@ -59,6 +59,8 @@
}
glDeleteProgram(id);
}
+
+ mUse = false;
}
Program::~Program() {
@@ -69,6 +71,11 @@
void Program::use() {
glUseProgram(id);
+ mUse = true;
+}
+
+void Program::remove() {
+ mUse = false;
}
int Program::addAttrib(const char* name) {
@@ -127,17 +134,26 @@
void DrawColorProgram::getAttribsAndUniforms() {
position = addAttrib("position");
color = addUniform("color");
- projection = addUniform("projection");
- modelView = addUniform("modelView");
transform = addUniform("transform");
}
-void DrawColorProgram::use(const GLfloat* projectionMatrix, const GLfloat* modelViewMatrix,
- const GLfloat* transformMatrix) {
+void DrawColorProgram::set(const mat4& projectionMatrix, const mat4& modelViewMatrix,
+ const mat4& transformMatrix) {
+ mat4 t(projectionMatrix);
+ t.multiply(transformMatrix);
+ t.multiply(modelViewMatrix);
+
+ glUniformMatrix4fv(transform, 1, GL_FALSE, &t.data[0]);
+}
+
+void DrawColorProgram::use() {
Program::use();
- glUniformMatrix4fv(projection, 1, GL_FALSE, projectionMatrix);
- glUniformMatrix4fv(modelView, 1, GL_FALSE, modelViewMatrix);
- glUniformMatrix4fv(transform, 1, GL_FALSE, transformMatrix);
+ glEnableVertexAttribArray(position);
+}
+
+void DrawColorProgram::remove() {
+ Program::remove();
+ glDisableVertexAttribArray(position);
}
///////////////////////////////////////////////////////////////////////////////
@@ -150,5 +166,17 @@
sampler = addUniform("sampler");
}
+void DrawTextureProgram::use() {
+ DrawColorProgram::use();
+ glActiveTexture(GL_TEXTURE0);
+ glUniform1i(sampler, 0);
+ glEnableVertexAttribArray(texCoords);
+}
+
+void DrawTextureProgram::remove() {
+ DrawColorProgram::remove();
+ glDisableVertexAttribArray(texCoords);
+}
+
}; // namespace uirenderer
}; // namespace android
diff --git a/libs/hwui/Program.h b/libs/hwui/Program.h
index ee16a92..d90bcaf 100644
--- a/libs/hwui/Program.h
+++ b/libs/hwui/Program.h
@@ -23,6 +23,8 @@
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
+#include "Matrix.h"
+
namespace android {
namespace uirenderer {
@@ -37,12 +39,26 @@
* shaders sources.
*/
Program(const char* vertex, const char* fragment);
- ~Program();
+ virtual ~Program();
/**
* Binds this program to the GL context.
*/
- void use();
+ virtual void use();
+
+ /**
+ * Marks this program as unused. This will not unbind
+ * the program from the GL context.
+ */
+ virtual void remove();
+
+ /**
+ * Indicates whether this program is currently in use with
+ * the GL context.
+ */
+ inline bool isInUse() const {
+ return mUse;
+ }
protected:
/**
@@ -85,6 +101,8 @@
// Keeps track of attributes and uniforms slots
KeyedVector<const char*, int> attributes;
KeyedVector<const char*, int> uniforms;
+
+ bool mUse;
}; // class Program
/**
@@ -107,26 +125,29 @@
* Binds the program with the specified projection, modelView and
* transform matrices.
*/
- void use(const GLfloat* projectionMatrix, const GLfloat* modelViewMatrix,
- const GLfloat* transformMatrix);
+ void set(const mat4& projectionMatrix, const mat4& modelViewMatrix,
+ const mat4& transformMatrix);
+
+ /**
+ * Binds this program to the GL context.
+ */
+ virtual void use();
+
+ /**
+ * Marks this program as unused. This will not unbind
+ * the program from the GL context.
+ */
+ virtual void remove();
/**
* Name of the position attribute.
*/
int position;
- /**
- * Name of the color attribute.
- */
- int color;
/**
- * Name of the projection uniform.
+ * Name of the color uniform.
*/
- int projection;
- /**
- * Name of the modelView uniform.
- */
- int modelView;
+ int color;
/**
* Name of the transform uniform.
*/
@@ -146,7 +167,25 @@
public:
DrawTextureProgram();
+ /**
+ * Binds this program to the GL context.
+ */
+ virtual void use();
+
+ /**
+ * Marks this program as unused. This will not unbind
+ * the program from the GL context.
+ */
+ virtual void remove();
+
+ /**
+ * Name of the texture sampler uniform.
+ */
int sampler;
+
+ /**
+ * Name of the texture coordinates attribute.
+ */
int texCoords;
};
diff --git a/libs/hwui/Snapshot.h b/libs/hwui/Snapshot.h
index 17ca440..7265d91 100644
--- a/libs/hwui/Snapshot.h
+++ b/libs/hwui/Snapshot.h
@@ -56,6 +56,8 @@
previous(s),
layer(NULL),
fbo(s->fbo) {
+ mappedClip.set(s->clipRect);
+ transform.mapRect(mappedClip);
}
/**
@@ -87,12 +89,20 @@
* Returns the current clip region mapped by the current transform.
*/
const Rect& getMappedClip() {
+ return mappedClip;
+ }
+
+ /**
+ * Intersects the current clip with the new clip rectangle.
+ */
+ bool clip(float left, float top, float right, float bottom) {
+ bool clipped = clipRect.intersect(left, top, right, bottom);
if (flags & kFlagDirtyTransform) {
flags &= ~kFlagDirtyTransform;
mappedClip.set(clipRect);
transform.mapRect(mappedClip);
}
- return mappedClip;
+ return clipped;
}
/**
@@ -130,7 +140,7 @@
/**
* Contains the previous ortho matrix.
*/
- float orthoMatrix[16];
+ mat4 orthoMatrix;
private:
// Clipping rectangle mapped with the transform
diff --git a/libs/hwui/TextureCache.cpp b/libs/hwui/TextureCache.cpp
index 612f04e..4977f46 100644
--- a/libs/hwui/TextureCache.cpp
+++ b/libs/hwui/TextureCache.cpp
@@ -133,7 +133,7 @@
GL_RGB, GL_UNSIGNED_SHORT_5_6_5, bitmap->getPixels());
break;
case SkBitmap::kARGB_8888_Config:
- texture->blend = true;
+ texture->blend = !bitmap->isOpaque();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bitmap->rowBytesAsPixels(), texture->height, 0,
GL_RGBA, GL_UNSIGNED_BYTE, bitmap->getPixels());
break;
diff --git a/libs/hwui/shaders/drawColor.vert b/libs/hwui/shaders/drawColor.vert
index 742ed98..20e2636 100644
--- a/libs/hwui/shaders/drawColor.vert
+++ b/libs/hwui/shaders/drawColor.vert
@@ -2,12 +2,10 @@
attribute vec4 position;
-uniform mat4 projection;
-uniform mat4 modelView;
uniform mat4 transform;
void main(void) {
- gl_Position = projection * transform * modelView * position;
+ gl_Position = transform * position;
}
);
diff --git a/libs/hwui/shaders/drawTexture.vert b/libs/hwui/shaders/drawTexture.vert
index 8abddb8..240aebf 100644
--- a/libs/hwui/shaders/drawTexture.vert
+++ b/libs/hwui/shaders/drawTexture.vert
@@ -3,15 +3,13 @@
attribute vec4 position;
attribute vec2 texCoords;
-uniform mat4 projection;
-uniform mat4 modelView;
uniform mat4 transform;
varying vec2 outTexCoords;
void main(void) {
outTexCoords = texCoords;
- gl_Position = projection * transform * modelView * position;
+ gl_Position = transform * position;
}
);
diff --git a/libs/rs/RenderScript.h b/libs/rs/RenderScript.h
index 8e6b5c6..745df95 100644
--- a/libs/rs/RenderScript.h
+++ b/libs/rs/RenderScript.h
@@ -244,6 +244,12 @@
RS_A3D_CLASS_ID_SCRIPT_C
};
+enum RsCullMode {
+ RS_CULL_BACK,
+ RS_CULL_FRONT,
+ RS_CULL_NONE
+};
+
typedef struct {
RsA3DClassID classID;
const char* objectName;
diff --git a/libs/rs/java/ModelViewer/res/raw/robot.a3d b/libs/rs/java/ModelViewer/res/raw/robot.a3d
index d220c81..2d7d32b 100644
--- a/libs/rs/java/ModelViewer/res/raw/robot.a3d
+++ b/libs/rs/java/ModelViewer/res/raw/robot.a3d
Binary files differ
diff --git a/libs/rs/rs.spec b/libs/rs/rs.spec
index 80047c1..1719029 100644
--- a/libs/rs/rs.spec
+++ b/libs/rs/rs.spec
@@ -347,8 +347,6 @@
}
ProgramRasterCreate {
- param RsElement in
- param RsElement out
param bool pointSmooth
param bool lineSmooth
param bool pointSprite
@@ -360,6 +358,10 @@
param float lw
}
+ProgramRasterSetCullMode {
+ param RsProgramRaster pr
+ param RsCullMode mode
+}
ProgramBindConstants {
param RsProgram vp
@@ -492,6 +494,29 @@
param uint32_t slot
}
+MeshGetVertexBufferCount {
+ param RsMesh mesh
+ param int32_t *numVtx
+ }
+
+MeshGetIndexCount {
+ param RsMesh mesh
+ param int32_t *numIdx
+ }
+
+MeshGetVertices {
+ param RsMesh mv
+ param RsAllocation *vtxData
+ param uint32_t vtxDataCount
+ }
+
+MeshGetIndices {
+ param RsMesh mv
+ param RsAllocation *va
+ param uint32_t *primType
+ param uint32_t idxDataCount
+ }
+
AnimationCreate {
param const float *inValues
param const float *outValues
diff --git a/libs/rs/rsAllocation.cpp b/libs/rs/rsAllocation.cpp
index d9d0bc5..6560101 100644
--- a/libs/rs/rsAllocation.cpp
+++ b/libs/rs/rsAllocation.cpp
@@ -683,6 +683,14 @@
a->read(data);
}
+const void* rsi_AllocationGetType(Context *rsc, RsAllocation va)
+{
+ Allocation *a = static_cast<Allocation *>(va);
+ a->getType()->incUserRef();
+
+ return a->getType();
+}
+
#endif //ANDROID_RS_BUILD_FOR_HOST
}
diff --git a/libs/rs/rsFont.cpp b/libs/rs/rsFont.cpp
index 2a47ca4..d1346fc 100644
--- a/libs/rs/rsFont.cpp
+++ b/libs/rs/rsFont.cpp
@@ -511,6 +511,9 @@
ObjectBaseRef<const ProgramVertex> tmpV(mRSC->getVertex());
mRSC->setVertex(mRSC->getDefaultProgramVertex());
+ ObjectBaseRef<const ProgramRaster> tmpR(mRSC->getRaster());
+ mRSC->setRaster(mRSC->getDefaultProgramRaster());
+
ObjectBaseRef<const ProgramFragment> tmpF(mRSC->getFragment());
mRSC->setFragment(mFontShaderF.get());
@@ -519,6 +522,7 @@
if (!mRSC->setupCheck()) {
mRSC->setVertex((ProgramVertex *)tmpV.get());
+ mRSC->setRaster((ProgramRaster *)tmpR.get());
mRSC->setFragment((ProgramFragment *)tmpF.get());
mRSC->setFragmentStore((ProgramStore *)tmpPS.get());
return;
@@ -538,6 +542,7 @@
// Reset the state
mRSC->setVertex((ProgramVertex *)tmpV.get());
+ mRSC->setRaster((ProgramRaster *)tmpR.get());
mRSC->setFragment((ProgramFragment *)tmpF.get());
mRSC->setFragmentStore((ProgramStore *)tmpPS.get());
}
diff --git a/libs/rs/rsMesh.cpp b/libs/rs/rsMesh.cpp
index d1b5581..9026578 100644
--- a/libs/rs/rsMesh.cpp
+++ b/libs/rs/rsMesh.cpp
@@ -271,4 +271,45 @@
sm->updateGLPrimitives();
}
+void rsi_MeshGetVertexBufferCount(Context *rsc, RsMesh mv, int32_t *numVtx)
+{
+ Mesh *sm = static_cast<Mesh *>(mv);
+ *numVtx = sm->mVertexBufferCount;
+}
+
+void rsi_MeshGetIndexCount(Context *rsc, RsMesh mv, int32_t *numIdx)
+{
+ Mesh *sm = static_cast<Mesh *>(mv);
+ *numIdx = sm->mPrimitivesCount;
+}
+
+void rsi_MeshGetVertices(Context *rsc, RsMesh mv, RsAllocation *vtxData, uint32_t vtxDataCount)
+{
+ Mesh *sm = static_cast<Mesh *>(mv);
+ rsAssert(vtxDataCount == sm->mVertexBufferCount);
+
+ for(uint32_t ct = 0; ct < vtxDataCount; ct ++) {
+ vtxData[ct] = sm->mVertexBuffers[ct].get();
+ sm->mVertexBuffers[ct]->incUserRef();
+ }
+}
+
+void rsi_MeshGetIndices(Context *rsc, RsMesh mv, RsAllocation *va, uint32_t *primType, uint32_t idxDataCount)
+{
+ Mesh *sm = static_cast<Mesh *>(mv);
+ rsAssert(idxDataCount == sm->mPrimitivesCount);
+
+ for(uint32_t ct = 0; ct < idxDataCount; ct ++) {
+ va[ct] = sm->mPrimitives[ct]->mIndexBuffer.get();
+ primType[ct] = sm->mPrimitives[ct]->mPrimitive;
+ if(sm->mPrimitives[ct]->mIndexBuffer.get()) {
+ sm->mPrimitives[ct]->mIndexBuffer->incUserRef();
+ }
+ }
+
+}
+
+
+
+
}}
diff --git a/libs/rs/rsProgramRaster.cpp b/libs/rs/rsProgramRaster.cpp
index 7663840..5b69370 100644
--- a/libs/rs/rsProgramRaster.cpp
+++ b/libs/rs/rsProgramRaster.cpp
@@ -42,6 +42,7 @@
mLineSmooth = lineSmooth;
mPointSprite = pointSprite;
mLineWidth = 1.0f;
+ mCull = RS_CULL_BACK;
}
ProgramRaster::~ProgramRaster()
@@ -51,14 +52,22 @@
void ProgramRaster::setLineWidth(float s)
{
mLineWidth = s;
+ mDirty = true;
+}
+
+void ProgramRaster::setCullMode(RsCullMode mode)
+{
+ mCull = mode;
+ mDirty = true;
}
void ProgramRaster::setupGL(const Context *rsc, ProgramRasterState *state)
{
- if (state->mLast.get() == this) {
+ if (state->mLast.get() == this && !mDirty) {
return;
}
state->mLast.set(this);
+ mDirty = false;
if (mPointSmooth) {
glEnable(GL_POINT_SMOOTH);
@@ -82,14 +91,43 @@
}
#endif //ANDROID_RS_BUILD_FOR_HOST
}
+
+ switch(mCull) {
+ case RS_CULL_BACK:
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_BACK);
+ break;
+ case RS_CULL_FRONT:
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_FRONT);
+ break;
+ case RS_CULL_NONE:
+ glDisable(GL_CULL_FACE);
+ break;
+ }
}
void ProgramRaster::setupGL2(const Context *rsc, ProgramRasterState *state)
{
- if (state->mLast.get() == this) {
+ if (state->mLast.get() == this && !mDirty) {
return;
}
state->mLast.set(this);
+ mDirty = false;
+
+ switch(mCull) {
+ case RS_CULL_BACK:
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_BACK);
+ break;
+ case RS_CULL_FRONT:
+ glEnable(GL_CULL_FACE);
+ glCullFace(GL_FRONT);
+ break;
+ case RS_CULL_NONE:
+ glDisable(GL_CULL_FACE);
+ break;
+ }
}
void ProgramRaster::serialize(OStream *stream) const
@@ -126,7 +164,7 @@
namespace android {
namespace renderscript {
-RsProgramRaster rsi_ProgramRasterCreate(Context * rsc, RsElement in, RsElement out,
+RsProgramRaster rsi_ProgramRasterCreate(Context * rsc,
bool pointSmooth,
bool lineSmooth,
bool pointSprite)
@@ -145,6 +183,12 @@
pr->setLineWidth(s);
}
+void rsi_ProgramRasterSetCullMode(Context * rsc, RsProgramRaster vpr, RsCullMode mode)
+{
+ ProgramRaster *pr = static_cast<ProgramRaster *>(vpr);
+ pr->setCullMode(mode);
+}
+
}
}
diff --git a/libs/rs/rsProgramRaster.h b/libs/rs/rsProgramRaster.h
index ea78e766..801ab2a 100644
--- a/libs/rs/rsProgramRaster.h
+++ b/libs/rs/rsProgramRaster.h
@@ -41,12 +41,14 @@
static ProgramRaster *createFromStream(Context *rsc, IStream *stream);
void setLineWidth(float w);
+ void setCullMode(RsCullMode mode);
protected:
bool mPointSmooth;
bool mLineSmooth;
bool mPointSprite;
float mLineWidth;
+ RsCullMode mCull;
};
class ProgramRasterState
diff --git a/libs/ui/Android.mk b/libs/ui/Android.mk
index 24cdc78..4243bbf 100644
--- a/libs/ui/Android.mk
+++ b/libs/ui/Android.mk
@@ -12,6 +12,7 @@
KeyLayoutMap.cpp \
KeyCharacterMap.cpp \
Input.cpp \
+ InputDevice.cpp \
InputDispatcher.cpp \
InputManager.cpp \
InputReader.cpp \
@@ -38,3 +39,13 @@
endif
include $(BUILD_SHARED_LIBRARY)
+
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/libs/ui/EventHub.cpp b/libs/ui/EventHub.cpp
index 27895f2..768b04e 100644
--- a/libs/ui/EventHub.cpp
+++ b/libs/ui/EventHub.cpp
@@ -54,6 +54,9 @@
*/
#define test_bit(bit, array) (array[bit/8] & (1<<(bit%8)))
+/* this macro computes the number of bytes needed to represent a bit array of the specified size */
+#define sizeof_bit_array(bits) ((bits + 7) / 8)
+
#define ID_MASK 0x0000ffff
#define SEQ_MASK 0x7fff0000
#define SEQ_SHIFT 16
@@ -182,7 +185,7 @@
}
int32_t EventHub::getScanCodeStateLocked(device_t* device, int32_t scanCode) const {
- uint8_t key_bitmask[(KEY_MAX + 7) / 8];
+ uint8_t key_bitmask[sizeof_bit_array(KEY_MAX + 1)];
memset(key_bitmask, 0, sizeof(key_bitmask));
if (ioctl(mFDs[id_to_index(device->id)].fd,
EVIOCGKEY(sizeof(key_bitmask)), key_bitmask) >= 0) {
@@ -218,7 +221,7 @@
Vector<int32_t> scanCodes;
device->layoutMap->findScancodes(keyCode, &scanCodes);
- uint8_t key_bitmask[(KEY_MAX + 7) / 8];
+ uint8_t key_bitmask[sizeof_bit_array(KEY_MAX + 1)];
memset(key_bitmask, 0, sizeof(key_bitmask));
if (ioctl(mFDs[id_to_index(device->id)].fd,
EVIOCGKEY(sizeof(key_bitmask)), key_bitmask) >= 0) {
@@ -264,7 +267,7 @@
}
int32_t EventHub::getSwitchStateLocked(device_t* device, int32_t sw) const {
- uint8_t sw_bitmask[(SW_MAX + 7) / 8];
+ uint8_t sw_bitmask[sizeof_bit_array(SW_MAX + 1)];
memset(sw_bitmask, 0, sizeof(sw_bitmask));
if (ioctl(mFDs[id_to_index(device->id)].fd,
EVIOCGSW(sizeof(sw_bitmask)), sw_bitmask) >= 0) {
@@ -409,7 +412,7 @@
LOGV("iev.code=%d outKeycode=%d outFlags=0x%08x err=%d\n",
iev.code, *outKeycode, *outFlags, err);
if (err != 0) {
- *outKeycode = 0;
+ *outKeycode = AKEYCODE_UNKNOWN;
*outFlags = 0;
}
} else {
@@ -509,6 +512,26 @@
// ----------------------------------------------------------------------------
+static bool containsNonZeroByte(const uint8_t* array, uint32_t startIndex, uint32_t endIndex) {
+ const uint8_t* end = array + endIndex;
+ array += startIndex;
+ while (array != end) {
+ if (*(array++) != 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const int32_t GAMEPAD_KEYCODES[] = {
+ AKEYCODE_BUTTON_A, AKEYCODE_BUTTON_B, AKEYCODE_BUTTON_C,
+ AKEYCODE_BUTTON_X, AKEYCODE_BUTTON_Y, AKEYCODE_BUTTON_Z,
+ AKEYCODE_BUTTON_L1, AKEYCODE_BUTTON_R1,
+ AKEYCODE_BUTTON_L2, AKEYCODE_BUTTON_R2,
+ AKEYCODE_BUTTON_THUMBL, AKEYCODE_BUTTON_THUMBR,
+ AKEYCODE_BUTTON_START, AKEYCODE_BUTTON_SELECT, AKEYCODE_BUTTON_MODE
+};
+
int EventHub::open_device(const char *deviceName)
{
int version;
@@ -626,27 +649,27 @@
mFDs[mFDCount].fd = fd;
mFDs[mFDCount].events = POLLIN;
- // figure out the kinds of events the device reports
+ // Figure out the kinds of events the device reports.
- // See if this is a keyboard, and classify it. Note that we only
- // consider up through the function keys; we don't want to include
- // ones after that (play cd etc) so we don't mistakenly consider a
- // controller to be a keyboard.
- uint8_t key_bitmask[(KEY_MAX+7)/8];
+ uint8_t key_bitmask[sizeof_bit_array(KEY_MAX + 1)];
memset(key_bitmask, 0, sizeof(key_bitmask));
+
LOGV("Getting keys...");
if (ioctl(fd, EVIOCGBIT(EV_KEY, sizeof(key_bitmask)), key_bitmask) >= 0) {
//LOGI("MAP\n");
- //for (int i=0; i<((KEY_MAX+7)/8); i++) {
+ //for (int i = 0; i < sizeof(key_bitmask); i++) {
// LOGI("%d: 0x%02x\n", i, key_bitmask[i]);
//}
- for (int i=0; i<((BTN_MISC+7)/8); i++) {
- if (key_bitmask[i] != 0) {
- device->classes |= INPUT_DEVICE_CLASS_KEYBOARD;
- break;
- }
- }
- if ((device->classes & INPUT_DEVICE_CLASS_KEYBOARD) != 0) {
+
+ // See if this is a keyboard. Ignore everything in the button range except for
+ // gamepads which are also considered keyboards.
+ if (containsNonZeroByte(key_bitmask, 0, sizeof_bit_array(BTN_MISC))
+ || containsNonZeroByte(key_bitmask, sizeof_bit_array(BTN_GAMEPAD),
+ sizeof_bit_array(BTN_DIGI))
+ || containsNonZeroByte(key_bitmask, sizeof_bit_array(KEY_OK),
+ sizeof_bit_array(KEY_MAX + 1))) {
+ device->classes |= INPUT_DEVICE_CLASS_KEYBOARD;
+
device->keyBitmask = new uint8_t[sizeof(key_bitmask)];
if (device->keyBitmask != NULL) {
memcpy(device->keyBitmask, key_bitmask, sizeof(key_bitmask));
@@ -658,39 +681,39 @@
}
}
- // See if this is a trackball.
+ // See if this is a trackball (or mouse).
if (test_bit(BTN_MOUSE, key_bitmask)) {
- uint8_t rel_bitmask[(REL_MAX+7)/8];
+ uint8_t rel_bitmask[sizeof_bit_array(REL_MAX + 1)];
memset(rel_bitmask, 0, sizeof(rel_bitmask));
LOGV("Getting relative controllers...");
- if (ioctl(fd, EVIOCGBIT(EV_REL, sizeof(rel_bitmask)), rel_bitmask) >= 0)
- {
+ if (ioctl(fd, EVIOCGBIT(EV_REL, sizeof(rel_bitmask)), rel_bitmask) >= 0) {
if (test_bit(REL_X, rel_bitmask) && test_bit(REL_Y, rel_bitmask)) {
device->classes |= INPUT_DEVICE_CLASS_TRACKBALL;
}
}
}
-
- uint8_t abs_bitmask[(ABS_MAX+7)/8];
+
+ // See if this is a touch pad.
+ uint8_t abs_bitmask[sizeof_bit_array(ABS_MAX + 1)];
memset(abs_bitmask, 0, sizeof(abs_bitmask));
LOGV("Getting absolute controllers...");
- ioctl(fd, EVIOCGBIT(EV_ABS, sizeof(abs_bitmask)), abs_bitmask);
-
- // Is this a new modern multi-touch driver?
- if (test_bit(ABS_MT_TOUCH_MAJOR, abs_bitmask)
- && test_bit(ABS_MT_POSITION_X, abs_bitmask)
- && test_bit(ABS_MT_POSITION_Y, abs_bitmask)) {
- device->classes |= INPUT_DEVICE_CLASS_TOUCHSCREEN | INPUT_DEVICE_CLASS_TOUCHSCREEN_MT;
-
- // Is this an old style single-touch driver?
- } else if (test_bit(BTN_TOUCH, key_bitmask)
- && test_bit(ABS_X, abs_bitmask) && test_bit(ABS_Y, abs_bitmask)) {
- device->classes |= INPUT_DEVICE_CLASS_TOUCHSCREEN;
+ if (ioctl(fd, EVIOCGBIT(EV_ABS, sizeof(abs_bitmask)), abs_bitmask) >= 0) {
+ // Is this a new modern multi-touch driver?
+ if (test_bit(ABS_MT_TOUCH_MAJOR, abs_bitmask)
+ && test_bit(ABS_MT_POSITION_X, abs_bitmask)
+ && test_bit(ABS_MT_POSITION_Y, abs_bitmask)) {
+ device->classes |= INPUT_DEVICE_CLASS_TOUCHSCREEN | INPUT_DEVICE_CLASS_TOUCHSCREEN_MT;
+
+ // Is this an old style single-touch driver?
+ } else if (test_bit(BTN_TOUCH, key_bitmask)
+ && test_bit(ABS_X, abs_bitmask) && test_bit(ABS_Y, abs_bitmask)) {
+ device->classes |= INPUT_DEVICE_CLASS_TOUCHSCREEN;
+ }
}
#ifdef EV_SW
// figure out the switches this device reports
- uint8_t sw_bitmask[(SW_MAX+7)/8];
+ uint8_t sw_bitmask[sizeof_bit_array(SW_MAX + 1)];
memset(sw_bitmask, 0, sizeof(sw_bitmask));
if (ioctl(fd, EVIOCGBIT(EV_SW, sizeof(sw_bitmask)), sw_bitmask) >= 0) {
for (int i=0; i<EV_SW; i++) {
@@ -726,7 +749,10 @@
"%s/usr/keylayout/%s", root, "qwerty.kl");
defaultKeymap = true;
}
- device->layoutMap->load(keylayoutFilename);
+ status_t status = device->layoutMap->load(keylayoutFilename);
+ if (status) {
+ LOGE("Error %d loading key layout.", status);
+ }
// tell the world about the devname (the descriptive name)
if (!mHaveFirstKeyboard && !defaultKeymap && strstr(name, "-keypad")) {
@@ -746,19 +772,27 @@
property_set(propName, name);
// 'Q' key support = cheap test of whether this is an alpha-capable kbd
- if (hasKeycode(device, kKeyCodeQ)) {
+ if (hasKeycode(device, AKEYCODE_Q)) {
device->classes |= INPUT_DEVICE_CLASS_ALPHAKEY;
}
- // See if this has a DPAD.
- if (hasKeycode(device, kKeyCodeDpadUp) &&
- hasKeycode(device, kKeyCodeDpadDown) &&
- hasKeycode(device, kKeyCodeDpadLeft) &&
- hasKeycode(device, kKeyCodeDpadRight) &&
- hasKeycode(device, kKeyCodeDpadCenter)) {
+ // See if this device has a DPAD.
+ if (hasKeycode(device, AKEYCODE_DPAD_UP) &&
+ hasKeycode(device, AKEYCODE_DPAD_DOWN) &&
+ hasKeycode(device, AKEYCODE_DPAD_LEFT) &&
+ hasKeycode(device, AKEYCODE_DPAD_RIGHT) &&
+ hasKeycode(device, AKEYCODE_DPAD_CENTER)) {
device->classes |= INPUT_DEVICE_CLASS_DPAD;
}
+ // See if this device has a gamepad.
+ for (size_t i = 0; i < sizeof(GAMEPAD_KEYCODES); i++) {
+ if (hasKeycode(device, GAMEPAD_KEYCODES[i])) {
+ device->classes |= INPUT_DEVICE_CLASS_GAMEPAD;
+ break;
+ }
+ }
+
LOGI("New keyboard: device->id=0x%x devname='%s' propName='%s' keylayout='%s'\n",
device->id, name, propName, keylayoutFilename);
}
diff --git a/libs/ui/Input.cpp b/libs/ui/Input.cpp
index a64251f..1f19c2c 100644
--- a/libs/ui/Input.cpp
+++ b/libs/ui/Input.cpp
@@ -22,26 +22,26 @@
bool KeyEvent::hasDefaultAction(int32_t keyCode) {
switch (keyCode) {
- case KEYCODE_HOME:
- case KEYCODE_BACK:
- case KEYCODE_CALL:
- case KEYCODE_ENDCALL:
- case KEYCODE_VOLUME_UP:
- case KEYCODE_VOLUME_DOWN:
- case KEYCODE_POWER:
- case KEYCODE_CAMERA:
- case KEYCODE_HEADSETHOOK:
- case KEYCODE_MENU:
- case KEYCODE_NOTIFICATION:
- case KEYCODE_FOCUS:
- case KEYCODE_SEARCH:
- case KEYCODE_MEDIA_PLAY_PAUSE:
- case KEYCODE_MEDIA_STOP:
- case KEYCODE_MEDIA_NEXT:
- case KEYCODE_MEDIA_PREVIOUS:
- case KEYCODE_MEDIA_REWIND:
- case KEYCODE_MEDIA_FAST_FORWARD:
- case KEYCODE_MUTE:
+ case AKEYCODE_HOME:
+ case AKEYCODE_BACK:
+ case AKEYCODE_CALL:
+ case AKEYCODE_ENDCALL:
+ case AKEYCODE_VOLUME_UP:
+ case AKEYCODE_VOLUME_DOWN:
+ case AKEYCODE_POWER:
+ case AKEYCODE_CAMERA:
+ case AKEYCODE_HEADSETHOOK:
+ case AKEYCODE_MENU:
+ case AKEYCODE_NOTIFICATION:
+ case AKEYCODE_FOCUS:
+ case AKEYCODE_SEARCH:
+ case AKEYCODE_MEDIA_PLAY_PAUSE:
+ case AKEYCODE_MEDIA_STOP:
+ case AKEYCODE_MEDIA_NEXT:
+ case AKEYCODE_MEDIA_PREVIOUS:
+ case AKEYCODE_MEDIA_REWIND:
+ case AKEYCODE_MEDIA_FAST_FORWARD:
+ case AKEYCODE_MUTE:
return true;
}
@@ -54,26 +54,26 @@
bool KeyEvent::isSystemKey(int32_t keyCode) {
switch (keyCode) {
- case KEYCODE_MENU:
- case KEYCODE_SOFT_RIGHT:
- case KEYCODE_HOME:
- case KEYCODE_BACK:
- case KEYCODE_CALL:
- case KEYCODE_ENDCALL:
- case KEYCODE_VOLUME_UP:
- case KEYCODE_VOLUME_DOWN:
- case KEYCODE_MUTE:
- case KEYCODE_POWER:
- case KEYCODE_HEADSETHOOK:
- case KEYCODE_MEDIA_PLAY_PAUSE:
- case KEYCODE_MEDIA_STOP:
- case KEYCODE_MEDIA_NEXT:
- case KEYCODE_MEDIA_PREVIOUS:
- case KEYCODE_MEDIA_REWIND:
- case KEYCODE_MEDIA_FAST_FORWARD:
- case KEYCODE_CAMERA:
- case KEYCODE_FOCUS:
- case KEYCODE_SEARCH:
+ case AKEYCODE_MENU:
+ case AKEYCODE_SOFT_RIGHT:
+ case AKEYCODE_HOME:
+ case AKEYCODE_BACK:
+ case AKEYCODE_CALL:
+ case AKEYCODE_ENDCALL:
+ case AKEYCODE_VOLUME_UP:
+ case AKEYCODE_VOLUME_DOWN:
+ case AKEYCODE_MUTE:
+ case AKEYCODE_POWER:
+ case AKEYCODE_HEADSETHOOK:
+ case AKEYCODE_MEDIA_PLAY_PAUSE:
+ case AKEYCODE_MEDIA_STOP:
+ case AKEYCODE_MEDIA_NEXT:
+ case AKEYCODE_MEDIA_PREVIOUS:
+ case AKEYCODE_MEDIA_REWIND:
+ case AKEYCODE_MEDIA_FAST_FORWARD:
+ case AKEYCODE_CAMERA:
+ case AKEYCODE_FOCUS:
+ case AKEYCODE_SEARCH:
return true;
}
diff --git a/libs/ui/InputDevice.cpp b/libs/ui/InputDevice.cpp
new file mode 100644
index 0000000..6014017
--- /dev/null
+++ b/libs/ui/InputDevice.cpp
@@ -0,0 +1,729 @@
+//
+// Copyright 2010 The Android Open Source Project
+//
+// The input reader.
+//
+#define LOG_TAG "InputDevice"
+
+//#define LOG_NDEBUG 0
+
+// Log debug messages for each raw event received from the EventHub.
+#define DEBUG_RAW_EVENTS 0
+
+// Log debug messages about touch screen filtering hacks.
+#define DEBUG_HACKS 0
+
+// Log debug messages about virtual key processing.
+#define DEBUG_VIRTUAL_KEYS 0
+
+// Log debug messages about pointers.
+#define DEBUG_POINTERS 0
+
+// Log debug messages about pointer assignment calculations.
+#define DEBUG_POINTER_ASSIGNMENT 0
+
+#include <cutils/log.h>
+#include <ui/InputDevice.h>
+
+#include <stddef.h>
+#include <unistd.h>
+#include <errno.h>
+#include <limits.h>
+
+/* Slop distance for jumpy pointer detection.
+ * The vertical range of the screen divided by this is our epsilon value. */
+#define JUMPY_EPSILON_DIVISOR 212
+
+/* Number of jumpy points to drop for touchscreens that need it. */
+#define JUMPY_TRANSITION_DROPS 3
+#define JUMPY_DROP_LIMIT 3
+
+/* Maximum squared distance for averaging.
+ * If moving farther than this, turn of averaging to avoid lag in response. */
+#define AVERAGING_DISTANCE_LIMIT (75 * 75)
+
+
+namespace android {
+
+// --- Static Functions ---
+
+template<typename T>
+inline static T abs(const T& value) {
+ return value < 0 ? - value : value;
+}
+
+template<typename T>
+inline static T min(const T& a, const T& b) {
+ return a < b ? a : b;
+}
+
+template<typename T>
+inline static void swap(T& a, T& b) {
+ T temp = a;
+ a = b;
+ b = temp;
+}
+
+
+// --- InputDevice ---
+
+InputDevice::InputDevice(int32_t id, uint32_t classes, String8 name) :
+ id(id), classes(classes), name(name), ignored(false) {
+}
+
+void InputDevice::reset() {
+ if (isKeyboard()) {
+ keyboard.reset();
+ }
+
+ if (isTrackball()) {
+ trackball.reset();
+ }
+
+ if (isMultiTouchScreen()) {
+ multiTouchScreen.reset();
+ } else if (isSingleTouchScreen()) {
+ singleTouchScreen.reset();
+ }
+
+ if (isTouchScreen()) {
+ touchScreen.reset();
+ }
+}
+
+
+// --- InputDevice::TouchData ---
+
+void InputDevice::TouchData::copyFrom(const TouchData& other) {
+ pointerCount = other.pointerCount;
+ idBits = other.idBits;
+
+ for (uint32_t i = 0; i < pointerCount; i++) {
+ pointers[i] = other.pointers[i];
+ idToIndex[i] = other.idToIndex[i];
+ }
+}
+
+
+// --- InputDevice::KeyboardState ---
+
+void InputDevice::KeyboardState::reset() {
+ current.metaState = META_NONE;
+ current.downTime = 0;
+}
+
+
+// --- InputDevice::TrackballState ---
+
+void InputDevice::TrackballState::reset() {
+ accumulator.clear();
+ current.down = false;
+ current.downTime = 0;
+}
+
+
+// --- InputDevice::TouchScreenState ---
+
+void InputDevice::TouchScreenState::reset() {
+ lastTouch.clear();
+ downTime = 0;
+ currentVirtualKey.status = CurrentVirtualKeyState::STATUS_UP;
+
+ for (uint32_t i = 0; i < MAX_POINTERS; i++) {
+ averagingTouchFilter.historyStart[i] = 0;
+ averagingTouchFilter.historyEnd[i] = 0;
+ }
+
+ jumpyTouchFilter.jumpyPointsDropped = 0;
+}
+
+struct PointerDistanceHeapElement {
+ uint32_t currentPointerIndex : 8;
+ uint32_t lastPointerIndex : 8;
+ uint64_t distance : 48; // squared distance
+};
+
+void InputDevice::TouchScreenState::calculatePointerIds() {
+ uint32_t currentPointerCount = currentTouch.pointerCount;
+ uint32_t lastPointerCount = lastTouch.pointerCount;
+
+ if (currentPointerCount == 0) {
+ // No pointers to assign.
+ currentTouch.idBits.clear();
+ } else if (lastPointerCount == 0) {
+ // All pointers are new.
+ currentTouch.idBits.clear();
+ for (uint32_t i = 0; i < currentPointerCount; i++) {
+ currentTouch.pointers[i].id = i;
+ currentTouch.idToIndex[i] = i;
+ currentTouch.idBits.markBit(i);
+ }
+ } else if (currentPointerCount == 1 && lastPointerCount == 1) {
+ // Only one pointer and no change in count so it must have the same id as before.
+ uint32_t id = lastTouch.pointers[0].id;
+ currentTouch.pointers[0].id = id;
+ currentTouch.idToIndex[id] = 0;
+ currentTouch.idBits.value = BitSet32::valueForBit(id);
+ } else {
+ // General case.
+ // We build a heap of squared euclidean distances between current and last pointers
+ // associated with the current and last pointer indices. Then, we find the best
+ // match (by distance) for each current pointer.
+ PointerDistanceHeapElement heap[MAX_POINTERS * MAX_POINTERS];
+
+ uint32_t heapSize = 0;
+ for (uint32_t currentPointerIndex = 0; currentPointerIndex < currentPointerCount;
+ currentPointerIndex++) {
+ for (uint32_t lastPointerIndex = 0; lastPointerIndex < lastPointerCount;
+ lastPointerIndex++) {
+ int64_t deltaX = currentTouch.pointers[currentPointerIndex].x
+ - lastTouch.pointers[lastPointerIndex].x;
+ int64_t deltaY = currentTouch.pointers[currentPointerIndex].y
+ - lastTouch.pointers[lastPointerIndex].y;
+
+ uint64_t distance = uint64_t(deltaX * deltaX + deltaY * deltaY);
+
+ // Insert new element into the heap (sift up).
+ heap[heapSize].currentPointerIndex = currentPointerIndex;
+ heap[heapSize].lastPointerIndex = lastPointerIndex;
+ heap[heapSize].distance = distance;
+ heapSize += 1;
+ }
+ }
+
+ // Heapify
+ for (uint32_t startIndex = heapSize / 2; startIndex != 0; ) {
+ startIndex -= 1;
+ for (uint32_t parentIndex = startIndex; ;) {
+ uint32_t childIndex = parentIndex * 2 + 1;
+ if (childIndex >= heapSize) {
+ break;
+ }
+
+ if (childIndex + 1 < heapSize
+ && heap[childIndex + 1].distance < heap[childIndex].distance) {
+ childIndex += 1;
+ }
+
+ if (heap[parentIndex].distance <= heap[childIndex].distance) {
+ break;
+ }
+
+ swap(heap[parentIndex], heap[childIndex]);
+ parentIndex = childIndex;
+ }
+ }
+
+#if DEBUG_POINTER_ASSIGNMENT
+ LOGD("calculatePointerIds - initial distance min-heap: size=%d", heapSize);
+ for (size_t i = 0; i < heapSize; i++) {
+ LOGD(" heap[%d]: cur=%d, last=%d, distance=%lld",
+ i, heap[i].currentPointerIndex, heap[i].lastPointerIndex,
+ heap[i].distance);
+ }
+#endif
+
+ // Pull matches out by increasing order of distance.
+ // To avoid reassigning pointers that have already been matched, the loop keeps track
+ // of which last and current pointers have been matched using the matchedXXXBits variables.
+ // It also tracks the used pointer id bits.
+ BitSet32 matchedLastBits(0);
+ BitSet32 matchedCurrentBits(0);
+ BitSet32 usedIdBits(0);
+ bool first = true;
+ for (uint32_t i = min(currentPointerCount, lastPointerCount); i > 0; i--) {
+ for (;;) {
+ if (first) {
+ // The first time through the loop, we just consume the root element of
+ // the heap (the one with smallest distance).
+ first = false;
+ } else {
+ // Previous iterations consumed the root element of the heap.
+ // Pop root element off of the heap (sift down).
+ heapSize -= 1;
+ assert(heapSize > 0);
+
+ // Sift down.
+ heap[0] = heap[heapSize];
+ for (uint32_t parentIndex = 0; ;) {
+ uint32_t childIndex = parentIndex * 2 + 1;
+ if (childIndex >= heapSize) {
+ break;
+ }
+
+ if (childIndex + 1 < heapSize
+ && heap[childIndex + 1].distance < heap[childIndex].distance) {
+ childIndex += 1;
+ }
+
+ if (heap[parentIndex].distance <= heap[childIndex].distance) {
+ break;
+ }
+
+ swap(heap[parentIndex], heap[childIndex]);
+ parentIndex = childIndex;
+ }
+
+#if DEBUG_POINTER_ASSIGNMENT
+ LOGD("calculatePointerIds - reduced distance min-heap: size=%d", heapSize);
+ for (size_t i = 0; i < heapSize; i++) {
+ LOGD(" heap[%d]: cur=%d, last=%d, distance=%lld",
+ i, heap[i].currentPointerIndex, heap[i].lastPointerIndex,
+ heap[i].distance);
+ }
+#endif
+ }
+
+ uint32_t currentPointerIndex = heap[0].currentPointerIndex;
+ if (matchedCurrentBits.hasBit(currentPointerIndex)) continue; // already matched
+
+ uint32_t lastPointerIndex = heap[0].lastPointerIndex;
+ if (matchedLastBits.hasBit(lastPointerIndex)) continue; // already matched
+
+ matchedCurrentBits.markBit(currentPointerIndex);
+ matchedLastBits.markBit(lastPointerIndex);
+
+ uint32_t id = lastTouch.pointers[lastPointerIndex].id;
+ currentTouch.pointers[currentPointerIndex].id = id;
+ currentTouch.idToIndex[id] = currentPointerIndex;
+ usedIdBits.markBit(id);
+
+#if DEBUG_POINTER_ASSIGNMENT
+ LOGD("calculatePointerIds - matched: cur=%d, last=%d, id=%d, distance=%lld",
+ lastPointerIndex, currentPointerIndex, id, heap[0].distance);
+#endif
+ break;
+ }
+ }
+
+ // Assign fresh ids to new pointers.
+ if (currentPointerCount > lastPointerCount) {
+ for (uint32_t i = currentPointerCount - lastPointerCount; ;) {
+ uint32_t currentPointerIndex = matchedCurrentBits.firstUnmarkedBit();
+ uint32_t id = usedIdBits.firstUnmarkedBit();
+
+ currentTouch.pointers[currentPointerIndex].id = id;
+ currentTouch.idToIndex[id] = currentPointerIndex;
+ usedIdBits.markBit(id);
+
+#if DEBUG_POINTER_ASSIGNMENT
+ LOGD("calculatePointerIds - assigned: cur=%d, id=%d",
+ currentPointerIndex, id);
+#endif
+
+ if (--i == 0) break; // done
+ matchedCurrentBits.markBit(currentPointerIndex);
+ }
+ }
+
+ // Fix id bits.
+ currentTouch.idBits = usedIdBits;
+ }
+}
+
+/* Special hack for devices that have bad screen data: if one of the
+ * points has moved more than a screen height from the last position,
+ * then drop it. */
+bool InputDevice::TouchScreenState::applyBadTouchFilter() {
+ // This hack requires valid axis parameters.
+ if (! parameters.yAxis.valid) {
+ return false;
+ }
+
+ uint32_t pointerCount = currentTouch.pointerCount;
+
+ // Nothing to do if there are no points.
+ if (pointerCount == 0) {
+ return false;
+ }
+
+ // Don't do anything if a finger is going down or up. We run
+ // here before assigning pointer IDs, so there isn't a good
+ // way to do per-finger matching.
+ if (pointerCount != lastTouch.pointerCount) {
+ return false;
+ }
+
+ // We consider a single movement across more than a 7/16 of
+ // the long size of the screen to be bad. This was a magic value
+ // determined by looking at the maximum distance it is feasible
+ // to actually move in one sample.
+ int32_t maxDeltaY = parameters.yAxis.range * 7 / 16;
+
+ // XXX The original code in InputDevice.java included commented out
+ // code for testing the X axis. Note that when we drop a point
+ // we don't actually restore the old X either. Strange.
+ // The old code also tries to track when bad points were previously
+ // detected but it turns out that due to the placement of a "break"
+ // at the end of the loop, we never set mDroppedBadPoint to true
+ // so it is effectively dead code.
+ // Need to figure out if the old code is busted or just overcomplicated
+ // but working as intended.
+
+ // Look through all new points and see if any are farther than
+ // acceptable from all previous points.
+ for (uint32_t i = pointerCount; i-- > 0; ) {
+ int32_t y = currentTouch.pointers[i].y;
+ int32_t closestY = INT_MAX;
+ int32_t closestDeltaY = 0;
+
+#if DEBUG_HACKS
+ LOGD("BadTouchFilter: Looking at next point #%d: y=%d", i, y);
+#endif
+
+ for (uint32_t j = pointerCount; j-- > 0; ) {
+ int32_t lastY = lastTouch.pointers[j].y;
+ int32_t deltaY = abs(y - lastY);
+
+#if DEBUG_HACKS
+ LOGD("BadTouchFilter: Comparing with last point #%d: y=%d deltaY=%d",
+ j, lastY, deltaY);
+#endif
+
+ if (deltaY < maxDeltaY) {
+ goto SkipSufficientlyClosePoint;
+ }
+ if (deltaY < closestDeltaY) {
+ closestDeltaY = deltaY;
+ closestY = lastY;
+ }
+ }
+
+ // Must not have found a close enough match.
+#if DEBUG_HACKS
+ LOGD("BadTouchFilter: Dropping bad point #%d: newY=%d oldY=%d deltaY=%d maxDeltaY=%d",
+ i, y, closestY, closestDeltaY, maxDeltaY);
+#endif
+
+ currentTouch.pointers[i].y = closestY;
+ return true; // XXX original code only corrects one point
+
+ SkipSufficientlyClosePoint: ;
+ }
+
+ // No change.
+ return false;
+}
+
+/* Special hack for devices that have bad screen data: drop points where
+ * the coordinate value for one axis has jumped to the other pointer's location.
+ */
+bool InputDevice::TouchScreenState::applyJumpyTouchFilter() {
+ // This hack requires valid axis parameters.
+ if (! parameters.yAxis.valid) {
+ return false;
+ }
+
+ uint32_t pointerCount = currentTouch.pointerCount;
+ if (lastTouch.pointerCount != pointerCount) {
+#if DEBUG_HACKS
+ LOGD("JumpyTouchFilter: Different pointer count %d -> %d",
+ lastTouch.pointerCount, pointerCount);
+ for (uint32_t i = 0; i < pointerCount; i++) {
+ LOGD(" Pointer %d (%d, %d)", i,
+ currentTouch.pointers[i].x, currentTouch.pointers[i].y);
+ }
+#endif
+
+ if (jumpyTouchFilter.jumpyPointsDropped < JUMPY_TRANSITION_DROPS) {
+ if (lastTouch.pointerCount == 1 && pointerCount == 2) {
+ // Just drop the first few events going from 1 to 2 pointers.
+ // They're bad often enough that they're not worth considering.
+ currentTouch.pointerCount = 1;
+ jumpyTouchFilter.jumpyPointsDropped += 1;
+
+#if DEBUG_HACKS
+ LOGD("JumpyTouchFilter: Pointer 2 dropped");
+#endif
+ return true;
+ } else if (lastTouch.pointerCount == 2 && pointerCount == 1) {
+ // The event when we go from 2 -> 1 tends to be messed up too
+ currentTouch.pointerCount = 2;
+ currentTouch.pointers[0] = lastTouch.pointers[0];
+ currentTouch.pointers[1] = lastTouch.pointers[1];
+ jumpyTouchFilter.jumpyPointsDropped += 1;
+
+#if DEBUG_HACKS
+ for (int32_t i = 0; i < 2; i++) {
+ LOGD("JumpyTouchFilter: Pointer %d replaced (%d, %d)", i,
+ currentTouch.pointers[i].x, currentTouch.pointers[i].y);
+ }
+#endif
+ return true;
+ }
+ }
+ // Reset jumpy points dropped on other transitions or if limit exceeded.
+ jumpyTouchFilter.jumpyPointsDropped = 0;
+
+#if DEBUG_HACKS
+ LOGD("JumpyTouchFilter: Transition - drop limit reset");
+#endif
+ return false;
+ }
+
+ // We have the same number of pointers as last time.
+ // A 'jumpy' point is one where the coordinate value for one axis
+ // has jumped to the other pointer's location. No need to do anything
+ // else if we only have one pointer.
+ if (pointerCount < 2) {
+ return false;
+ }
+
+ if (jumpyTouchFilter.jumpyPointsDropped < JUMPY_DROP_LIMIT) {
+ int jumpyEpsilon = parameters.yAxis.range / JUMPY_EPSILON_DIVISOR;
+
+ // We only replace the single worst jumpy point as characterized by pointer distance
+ // in a single axis.
+ int32_t badPointerIndex = -1;
+ int32_t badPointerReplacementIndex = -1;
+ int32_t badPointerDistance = INT_MIN; // distance to be corrected
+
+ for (uint32_t i = pointerCount; i-- > 0; ) {
+ int32_t x = currentTouch.pointers[i].x;
+ int32_t y = currentTouch.pointers[i].y;
+
+#if DEBUG_HACKS
+ LOGD("JumpyTouchFilter: Point %d (%d, %d)", i, x, y);
+#endif
+
+ // Check if a touch point is too close to another's coordinates
+ bool dropX = false, dropY = false;
+ for (uint32_t j = 0; j < pointerCount; j++) {
+ if (i == j) {
+ continue;
+ }
+
+ if (abs(x - currentTouch.pointers[j].x) <= jumpyEpsilon) {
+ dropX = true;
+ break;
+ }
+
+ if (abs(y - currentTouch.pointers[j].y) <= jumpyEpsilon) {
+ dropY = true;
+ break;
+ }
+ }
+ if (! dropX && ! dropY) {
+ continue; // not jumpy
+ }
+
+ // Find a replacement candidate by comparing with older points on the
+ // complementary (non-jumpy) axis.
+ int32_t distance = INT_MIN; // distance to be corrected
+ int32_t replacementIndex = -1;
+
+ if (dropX) {
+ // X looks too close. Find an older replacement point with a close Y.
+ int32_t smallestDeltaY = INT_MAX;
+ for (uint32_t j = 0; j < pointerCount; j++) {
+ int32_t deltaY = abs(y - lastTouch.pointers[j].y);
+ if (deltaY < smallestDeltaY) {
+ smallestDeltaY = deltaY;
+ replacementIndex = j;
+ }
+ }
+ distance = abs(x - lastTouch.pointers[replacementIndex].x);
+ } else {
+ // Y looks too close. Find an older replacement point with a close X.
+ int32_t smallestDeltaX = INT_MAX;
+ for (uint32_t j = 0; j < pointerCount; j++) {
+ int32_t deltaX = abs(x - lastTouch.pointers[j].x);
+ if (deltaX < smallestDeltaX) {
+ smallestDeltaX = deltaX;
+ replacementIndex = j;
+ }
+ }
+ distance = abs(y - lastTouch.pointers[replacementIndex].y);
+ }
+
+ // If replacing this pointer would correct a worse error than the previous ones
+ // considered, then use this replacement instead.
+ if (distance > badPointerDistance) {
+ badPointerIndex = i;
+ badPointerReplacementIndex = replacementIndex;
+ badPointerDistance = distance;
+ }
+ }
+
+ // Correct the jumpy pointer if one was found.
+ if (badPointerIndex >= 0) {
+#if DEBUG_HACKS
+ LOGD("JumpyTouchFilter: Replacing bad pointer %d with (%d, %d)",
+ badPointerIndex,
+ lastTouch.pointers[badPointerReplacementIndex].x,
+ lastTouch.pointers[badPointerReplacementIndex].y);
+#endif
+
+ currentTouch.pointers[badPointerIndex].x =
+ lastTouch.pointers[badPointerReplacementIndex].x;
+ currentTouch.pointers[badPointerIndex].y =
+ lastTouch.pointers[badPointerReplacementIndex].y;
+ jumpyTouchFilter.jumpyPointsDropped += 1;
+ return true;
+ }
+ }
+
+ jumpyTouchFilter.jumpyPointsDropped = 0;
+ return false;
+}
+
+/* Special hack for devices that have bad screen data: aggregate and
+ * compute averages of the coordinate data, to reduce the amount of
+ * jitter seen by applications. */
+void InputDevice::TouchScreenState::applyAveragingTouchFilter() {
+ for (uint32_t currentIndex = 0; currentIndex < currentTouch.pointerCount; currentIndex++) {
+ uint32_t id = currentTouch.pointers[currentIndex].id;
+ int32_t x = currentTouch.pointers[currentIndex].x;
+ int32_t y = currentTouch.pointers[currentIndex].y;
+ int32_t pressure = currentTouch.pointers[currentIndex].pressure;
+
+ if (lastTouch.idBits.hasBit(id)) {
+ // Pointer was down before and is still down now.
+ // Compute average over history trace.
+ uint32_t start = averagingTouchFilter.historyStart[id];
+ uint32_t end = averagingTouchFilter.historyEnd[id];
+
+ int64_t deltaX = x - averagingTouchFilter.historyData[end].pointers[id].x;
+ int64_t deltaY = y - averagingTouchFilter.historyData[end].pointers[id].y;
+ uint64_t distance = uint64_t(deltaX * deltaX + deltaY * deltaY);
+
+#if DEBUG_HACKS
+ LOGD("AveragingTouchFilter: Pointer id %d - Distance from last sample: %lld",
+ id, distance);
+#endif
+
+ if (distance < AVERAGING_DISTANCE_LIMIT) {
+ // Increment end index in preparation for recording new historical data.
+ end += 1;
+ if (end > AVERAGING_HISTORY_SIZE) {
+ end = 0;
+ }
+
+ // If the end index has looped back to the start index then we have filled
+ // the historical trace up to the desired size so we drop the historical
+ // data at the start of the trace.
+ if (end == start) {
+ start += 1;
+ if (start > AVERAGING_HISTORY_SIZE) {
+ start = 0;
+ }
+ }
+
+ // Add the raw data to the historical trace.
+ averagingTouchFilter.historyStart[id] = start;
+ averagingTouchFilter.historyEnd[id] = end;
+ averagingTouchFilter.historyData[end].pointers[id].x = x;
+ averagingTouchFilter.historyData[end].pointers[id].y = y;
+ averagingTouchFilter.historyData[end].pointers[id].pressure = pressure;
+
+ // Average over all historical positions in the trace by total pressure.
+ int32_t averagedX = 0;
+ int32_t averagedY = 0;
+ int32_t totalPressure = 0;
+ for (;;) {
+ int32_t historicalX = averagingTouchFilter.historyData[start].pointers[id].x;
+ int32_t historicalY = averagingTouchFilter.historyData[start].pointers[id].y;
+ int32_t historicalPressure = averagingTouchFilter.historyData[start]
+ .pointers[id].pressure;
+
+ averagedX += historicalX * historicalPressure;
+ averagedY += historicalY * historicalPressure;
+ totalPressure += historicalPressure;
+
+ if (start == end) {
+ break;
+ }
+
+ start += 1;
+ if (start > AVERAGING_HISTORY_SIZE) {
+ start = 0;
+ }
+ }
+
+ averagedX /= totalPressure;
+ averagedY /= totalPressure;
+
+#if DEBUG_HACKS
+ LOGD("AveragingTouchFilter: Pointer id %d - "
+ "totalPressure=%d, averagedX=%d, averagedY=%d", id, totalPressure,
+ averagedX, averagedY);
+#endif
+
+ currentTouch.pointers[currentIndex].x = averagedX;
+ currentTouch.pointers[currentIndex].y = averagedY;
+ } else {
+#if DEBUG_HACKS
+ LOGD("AveragingTouchFilter: Pointer id %d - Exceeded max distance", id);
+#endif
+ }
+ } else {
+#if DEBUG_HACKS
+ LOGD("AveragingTouchFilter: Pointer id %d - Pointer went up", id);
+#endif
+ }
+
+ // Reset pointer history.
+ averagingTouchFilter.historyStart[id] = 0;
+ averagingTouchFilter.historyEnd[id] = 0;
+ averagingTouchFilter.historyData[0].pointers[id].x = x;
+ averagingTouchFilter.historyData[0].pointers[id].y = y;
+ averagingTouchFilter.historyData[0].pointers[id].pressure = pressure;
+ }
+}
+
+bool InputDevice::TouchScreenState::isPointInsideDisplay(int32_t x, int32_t y) const {
+ if (! parameters.xAxis.valid || ! parameters.yAxis.valid) {
+ // Assume all points on a touch screen without valid axis parameters are
+ // inside the display.
+ return true;
+ }
+
+ return x >= parameters.xAxis.minValue
+ && x <= parameters.xAxis.maxValue
+ && y >= parameters.yAxis.minValue
+ && y <= parameters.yAxis.maxValue;
+}
+
+const InputDevice::VirtualKey* InputDevice::TouchScreenState::findVirtualKeyHit() const {
+ int32_t x = currentTouch.pointers[0].x;
+ int32_t y = currentTouch.pointers[0].y;
+ for (size_t i = 0; i < virtualKeys.size(); i++) {
+ const InputDevice::VirtualKey& virtualKey = virtualKeys[i];
+
+#if DEBUG_VIRTUAL_KEYS
+ LOGD("VirtualKeys: Hit test (%d, %d): keyCode=%d, scanCode=%d, "
+ "left=%d, top=%d, right=%d, bottom=%d",
+ x, y,
+ virtualKey.keyCode, virtualKey.scanCode,
+ virtualKey.hitLeft, virtualKey.hitTop,
+ virtualKey.hitRight, virtualKey.hitBottom);
+#endif
+
+ if (virtualKey.isHit(x, y)) {
+ return & virtualKey;
+ }
+ }
+
+ return NULL;
+}
+
+
+// --- InputDevice::SingleTouchScreenState ---
+
+void InputDevice::SingleTouchScreenState::reset() {
+ accumulator.clear();
+ current.down = false;
+ current.x = 0;
+ current.y = 0;
+ current.pressure = 0;
+ current.size = 0;
+}
+
+
+// --- InputDevice::MultiTouchScreenState ---
+
+void InputDevice::MultiTouchScreenState::reset() {
+ accumulator.clear();
+}
+
+} // namespace android
diff --git a/libs/ui/InputDispatcher.cpp b/libs/ui/InputDispatcher.cpp
index f809cba..c4ffce1 100644
--- a/libs/ui/InputDispatcher.cpp
+++ b/libs/ui/InputDispatcher.cpp
@@ -40,10 +40,10 @@
// TODO, this needs to be somewhere else, perhaps in the policy
static inline bool isMovementKey(int32_t keyCode) {
- return keyCode == KEYCODE_DPAD_UP
- || keyCode == KEYCODE_DPAD_DOWN
- || keyCode == KEYCODE_DPAD_LEFT
- || keyCode == KEYCODE_DPAD_RIGHT;
+ return keyCode == AKEYCODE_DPAD_UP
+ || keyCode == AKEYCODE_DPAD_DOWN
+ || keyCode == AKEYCODE_DPAD_LEFT
+ || keyCode == AKEYCODE_DPAD_RIGHT;
}
static inline nsecs_t now() {
diff --git a/libs/ui/InputReader.cpp b/libs/ui/InputReader.cpp
index fced15c..0a21db7 100644
--- a/libs/ui/InputReader.cpp
+++ b/libs/ui/InputReader.cpp
@@ -33,18 +33,6 @@
/** Amount that trackball needs to move in order to generate a key event. */
#define TRACKBALL_MOVEMENT_THRESHOLD 6
-/* Slop distance for jumpy pointer detection.
- * The vertical range of the screen divided by this is our epsilon value. */
-#define JUMPY_EPSILON_DIVISOR 212
-
-/* Number of jumpy points to drop for touchscreens that need it. */
-#define JUMPY_TRANSITION_DROPS 3
-#define JUMPY_DROP_LIMIT 3
-
-/* Maximum squared distance for averaging.
- * If moving farther than this, turn of averaging to avoid lag in response. */
-#define AVERAGING_DISTANCE_LIMIT (75 * 75)
-
namespace android {
@@ -71,19 +59,19 @@
int32_t updateMetaState(int32_t keyCode, bool down, int32_t oldMetaState) {
int32_t mask;
switch (keyCode) {
- case KEYCODE_ALT_LEFT:
+ case AKEYCODE_ALT_LEFT:
mask = META_ALT_LEFT_ON;
break;
- case KEYCODE_ALT_RIGHT:
+ case AKEYCODE_ALT_RIGHT:
mask = META_ALT_RIGHT_ON;
break;
- case KEYCODE_SHIFT_LEFT:
+ case AKEYCODE_SHIFT_LEFT:
mask = META_SHIFT_LEFT_ON;
break;
- case KEYCODE_SHIFT_RIGHT:
+ case AKEYCODE_SHIFT_RIGHT:
mask = META_SHIFT_RIGHT_ON;
break;
- case KEYCODE_SYM:
+ case AKEYCODE_SYM:
mask = META_SYM_ON;
break;
default:
@@ -107,10 +95,10 @@
static const int32_t keyCodeRotationMap[][4] = {
// key codes enumerated counter-clockwise with the original (unrotated) key first
// no rotation, 90 degree rotation, 180 degree rotation, 270 degree rotation
- { KEYCODE_DPAD_DOWN, KEYCODE_DPAD_RIGHT, KEYCODE_DPAD_UP, KEYCODE_DPAD_LEFT },
- { KEYCODE_DPAD_RIGHT, KEYCODE_DPAD_UP, KEYCODE_DPAD_LEFT, KEYCODE_DPAD_DOWN },
- { KEYCODE_DPAD_UP, KEYCODE_DPAD_LEFT, KEYCODE_DPAD_DOWN, KEYCODE_DPAD_RIGHT },
- { KEYCODE_DPAD_LEFT, KEYCODE_DPAD_DOWN, KEYCODE_DPAD_RIGHT, KEYCODE_DPAD_UP },
+ { AKEYCODE_DPAD_DOWN, AKEYCODE_DPAD_RIGHT, AKEYCODE_DPAD_UP, AKEYCODE_DPAD_LEFT },
+ { AKEYCODE_DPAD_RIGHT, AKEYCODE_DPAD_UP, AKEYCODE_DPAD_LEFT, AKEYCODE_DPAD_DOWN },
+ { AKEYCODE_DPAD_UP, AKEYCODE_DPAD_LEFT, AKEYCODE_DPAD_DOWN, AKEYCODE_DPAD_RIGHT },
+ { AKEYCODE_DPAD_LEFT, AKEYCODE_DPAD_DOWN, AKEYCODE_DPAD_RIGHT, AKEYCODE_DPAD_UP },
};
static const int keyCodeRotationMapSize =
sizeof(keyCodeRotationMap) / sizeof(keyCodeRotationMap[0]);
@@ -127,668 +115,6 @@
}
-// --- InputDevice ---
-
-InputDevice::InputDevice(int32_t id, uint32_t classes, String8 name) :
- id(id), classes(classes), name(name), ignored(false) {
-}
-
-void InputDevice::reset() {
- if (isKeyboard()) {
- keyboard.reset();
- }
-
- if (isTrackball()) {
- trackball.reset();
- }
-
- if (isMultiTouchScreen()) {
- multiTouchScreen.reset();
- } else if (isSingleTouchScreen()) {
- singleTouchScreen.reset();
- }
-
- if (isTouchScreen()) {
- touchScreen.reset();
- }
-}
-
-
-// --- InputDevice::TouchData ---
-
-void InputDevice::TouchData::copyFrom(const TouchData& other) {
- pointerCount = other.pointerCount;
- idBits = other.idBits;
-
- for (uint32_t i = 0; i < pointerCount; i++) {
- pointers[i] = other.pointers[i];
- idToIndex[i] = other.idToIndex[i];
- }
-}
-
-
-// --- InputDevice::KeyboardState ---
-
-void InputDevice::KeyboardState::reset() {
- current.metaState = META_NONE;
- current.downTime = 0;
-}
-
-
-// --- InputDevice::TrackballState ---
-
-void InputDevice::TrackballState::reset() {
- accumulator.clear();
- current.down = false;
- current.downTime = 0;
-}
-
-
-// --- InputDevice::TouchScreenState ---
-
-void InputDevice::TouchScreenState::reset() {
- lastTouch.clear();
- downTime = 0;
- currentVirtualKey.status = CurrentVirtualKeyState::STATUS_UP;
-
- for (uint32_t i = 0; i < MAX_POINTERS; i++) {
- averagingTouchFilter.historyStart[i] = 0;
- averagingTouchFilter.historyEnd[i] = 0;
- }
-
- jumpyTouchFilter.jumpyPointsDropped = 0;
-}
-
-struct PointerDistanceHeapElement {
- uint32_t currentPointerIndex : 8;
- uint32_t lastPointerIndex : 8;
- uint64_t distance : 48; // squared distance
-};
-
-void InputDevice::TouchScreenState::calculatePointerIds() {
- uint32_t currentPointerCount = currentTouch.pointerCount;
- uint32_t lastPointerCount = lastTouch.pointerCount;
-
- if (currentPointerCount == 0) {
- // No pointers to assign.
- currentTouch.idBits.clear();
- } else if (lastPointerCount == 0) {
- // All pointers are new.
- currentTouch.idBits.clear();
- for (uint32_t i = 0; i < currentPointerCount; i++) {
- currentTouch.pointers[i].id = i;
- currentTouch.idToIndex[i] = i;
- currentTouch.idBits.markBit(i);
- }
- } else if (currentPointerCount == 1 && lastPointerCount == 1) {
- // Only one pointer and no change in count so it must have the same id as before.
- uint32_t id = lastTouch.pointers[0].id;
- currentTouch.pointers[0].id = id;
- currentTouch.idToIndex[id] = 0;
- currentTouch.idBits.value = BitSet32::valueForBit(id);
- } else {
- // General case.
- // We build a heap of squared euclidean distances between current and last pointers
- // associated with the current and last pointer indices. Then, we find the best
- // match (by distance) for each current pointer.
- PointerDistanceHeapElement heap[MAX_POINTERS * MAX_POINTERS];
-
- uint32_t heapSize = 0;
- for (uint32_t currentPointerIndex = 0; currentPointerIndex < currentPointerCount;
- currentPointerIndex++) {
- for (uint32_t lastPointerIndex = 0; lastPointerIndex < lastPointerCount;
- lastPointerIndex++) {
- int64_t deltaX = currentTouch.pointers[currentPointerIndex].x
- - lastTouch.pointers[lastPointerIndex].x;
- int64_t deltaY = currentTouch.pointers[currentPointerIndex].y
- - lastTouch.pointers[lastPointerIndex].y;
-
- uint64_t distance = uint64_t(deltaX * deltaX + deltaY * deltaY);
-
- // Insert new element into the heap (sift up).
- heap[heapSize].currentPointerIndex = currentPointerIndex;
- heap[heapSize].lastPointerIndex = lastPointerIndex;
- heap[heapSize].distance = distance;
- heapSize += 1;
- }
- }
-
- // Heapify
- for (uint32_t startIndex = heapSize / 2; startIndex != 0; ) {
- startIndex -= 1;
- for (uint32_t parentIndex = startIndex; ;) {
- uint32_t childIndex = parentIndex * 2 + 1;
- if (childIndex >= heapSize) {
- break;
- }
-
- if (childIndex + 1 < heapSize
- && heap[childIndex + 1].distance < heap[childIndex].distance) {
- childIndex += 1;
- }
-
- if (heap[parentIndex].distance <= heap[childIndex].distance) {
- break;
- }
-
- swap(heap[parentIndex], heap[childIndex]);
- parentIndex = childIndex;
- }
- }
-
-#if DEBUG_POINTER_ASSIGNMENT
- LOGD("calculatePointerIds - initial distance min-heap: size=%d", heapSize);
- for (size_t i = 0; i < heapSize; i++) {
- LOGD(" heap[%d]: cur=%d, last=%d, distance=%lld",
- i, heap[i].currentPointerIndex, heap[i].lastPointerIndex,
- heap[i].distance);
- }
-#endif
-
- // Pull matches out by increasing order of distance.
- // To avoid reassigning pointers that have already been matched, the loop keeps track
- // of which last and current pointers have been matched using the matchedXXXBits variables.
- // It also tracks the used pointer id bits.
- BitSet32 matchedLastBits(0);
- BitSet32 matchedCurrentBits(0);
- BitSet32 usedIdBits(0);
- bool first = true;
- for (uint32_t i = min(currentPointerCount, lastPointerCount); i > 0; i--) {
- for (;;) {
- if (first) {
- // The first time through the loop, we just consume the root element of
- // the heap (the one with smallest distance).
- first = false;
- } else {
- // Previous iterations consumed the root element of the heap.
- // Pop root element off of the heap (sift down).
- heapSize -= 1;
- assert(heapSize > 0);
-
- // Sift down.
- heap[0] = heap[heapSize];
- for (uint32_t parentIndex = 0; ;) {
- uint32_t childIndex = parentIndex * 2 + 1;
- if (childIndex >= heapSize) {
- break;
- }
-
- if (childIndex + 1 < heapSize
- && heap[childIndex + 1].distance < heap[childIndex].distance) {
- childIndex += 1;
- }
-
- if (heap[parentIndex].distance <= heap[childIndex].distance) {
- break;
- }
-
- swap(heap[parentIndex], heap[childIndex]);
- parentIndex = childIndex;
- }
-
-#if DEBUG_POINTER_ASSIGNMENT
- LOGD("calculatePointerIds - reduced distance min-heap: size=%d", heapSize);
- for (size_t i = 0; i < heapSize; i++) {
- LOGD(" heap[%d]: cur=%d, last=%d, distance=%lld",
- i, heap[i].currentPointerIndex, heap[i].lastPointerIndex,
- heap[i].distance);
- }
-#endif
- }
-
- uint32_t currentPointerIndex = heap[0].currentPointerIndex;
- if (matchedCurrentBits.hasBit(currentPointerIndex)) continue; // already matched
-
- uint32_t lastPointerIndex = heap[0].lastPointerIndex;
- if (matchedLastBits.hasBit(lastPointerIndex)) continue; // already matched
-
- matchedCurrentBits.markBit(currentPointerIndex);
- matchedLastBits.markBit(lastPointerIndex);
-
- uint32_t id = lastTouch.pointers[lastPointerIndex].id;
- currentTouch.pointers[currentPointerIndex].id = id;
- currentTouch.idToIndex[id] = currentPointerIndex;
- usedIdBits.markBit(id);
-
-#if DEBUG_POINTER_ASSIGNMENT
- LOGD("calculatePointerIds - matched: cur=%d, last=%d, id=%d, distance=%lld",
- lastPointerIndex, currentPointerIndex, id, heap[0].distance);
-#endif
- break;
- }
- }
-
- // Assign fresh ids to new pointers.
- if (currentPointerCount > lastPointerCount) {
- for (uint32_t i = currentPointerCount - lastPointerCount; ;) {
- uint32_t currentPointerIndex = matchedCurrentBits.firstUnmarkedBit();
- uint32_t id = usedIdBits.firstUnmarkedBit();
-
- currentTouch.pointers[currentPointerIndex].id = id;
- currentTouch.idToIndex[id] = currentPointerIndex;
- usedIdBits.markBit(id);
-
-#if DEBUG_POINTER_ASSIGNMENT
- LOGD("calculatePointerIds - assigned: cur=%d, id=%d",
- currentPointerIndex, id);
-#endif
-
- if (--i == 0) break; // done
- matchedCurrentBits.markBit(currentPointerIndex);
- }
- }
-
- // Fix id bits.
- currentTouch.idBits = usedIdBits;
- }
-}
-
-/* Special hack for devices that have bad screen data: if one of the
- * points has moved more than a screen height from the last position,
- * then drop it. */
-bool InputDevice::TouchScreenState::applyBadTouchFilter() {
- // This hack requires valid axis parameters.
- if (! parameters.yAxis.valid) {
- return false;
- }
-
- uint32_t pointerCount = currentTouch.pointerCount;
-
- // Nothing to do if there are no points.
- if (pointerCount == 0) {
- return false;
- }
-
- // Don't do anything if a finger is going down or up. We run
- // here before assigning pointer IDs, so there isn't a good
- // way to do per-finger matching.
- if (pointerCount != lastTouch.pointerCount) {
- return false;
- }
-
- // We consider a single movement across more than a 7/16 of
- // the long size of the screen to be bad. This was a magic value
- // determined by looking at the maximum distance it is feasible
- // to actually move in one sample.
- int32_t maxDeltaY = parameters.yAxis.range * 7 / 16;
-
- // XXX The original code in InputDevice.java included commented out
- // code for testing the X axis. Note that when we drop a point
- // we don't actually restore the old X either. Strange.
- // The old code also tries to track when bad points were previously
- // detected but it turns out that due to the placement of a "break"
- // at the end of the loop, we never set mDroppedBadPoint to true
- // so it is effectively dead code.
- // Need to figure out if the old code is busted or just overcomplicated
- // but working as intended.
-
- // Look through all new points and see if any are farther than
- // acceptable from all previous points.
- for (uint32_t i = pointerCount; i-- > 0; ) {
- int32_t y = currentTouch.pointers[i].y;
- int32_t closestY = INT_MAX;
- int32_t closestDeltaY = 0;
-
-#if DEBUG_HACKS
- LOGD("BadTouchFilter: Looking at next point #%d: y=%d", i, y);
-#endif
-
- for (uint32_t j = pointerCount; j-- > 0; ) {
- int32_t lastY = lastTouch.pointers[j].y;
- int32_t deltaY = abs(y - lastY);
-
-#if DEBUG_HACKS
- LOGD("BadTouchFilter: Comparing with last point #%d: y=%d deltaY=%d",
- j, lastY, deltaY);
-#endif
-
- if (deltaY < maxDeltaY) {
- goto SkipSufficientlyClosePoint;
- }
- if (deltaY < closestDeltaY) {
- closestDeltaY = deltaY;
- closestY = lastY;
- }
- }
-
- // Must not have found a close enough match.
-#if DEBUG_HACKS
- LOGD("BadTouchFilter: Dropping bad point #%d: newY=%d oldY=%d deltaY=%d maxDeltaY=%d",
- i, y, closestY, closestDeltaY, maxDeltaY);
-#endif
-
- currentTouch.pointers[i].y = closestY;
- return true; // XXX original code only corrects one point
-
- SkipSufficientlyClosePoint: ;
- }
-
- // No change.
- return false;
-}
-
-/* Special hack for devices that have bad screen data: drop points where
- * the coordinate value for one axis has jumped to the other pointer's location.
- */
-bool InputDevice::TouchScreenState::applyJumpyTouchFilter() {
- // This hack requires valid axis parameters.
- if (! parameters.yAxis.valid) {
- return false;
- }
-
- uint32_t pointerCount = currentTouch.pointerCount;
- if (lastTouch.pointerCount != pointerCount) {
-#if DEBUG_HACKS
- LOGD("JumpyTouchFilter: Different pointer count %d -> %d",
- lastTouch.pointerCount, pointerCount);
- for (uint32_t i = 0; i < pointerCount; i++) {
- LOGD(" Pointer %d (%d, %d)", i,
- currentTouch.pointers[i].x, currentTouch.pointers[i].y);
- }
-#endif
-
- if (jumpyTouchFilter.jumpyPointsDropped < JUMPY_TRANSITION_DROPS) {
- if (lastTouch.pointerCount == 1 && pointerCount == 2) {
- // Just drop the first few events going from 1 to 2 pointers.
- // They're bad often enough that they're not worth considering.
- currentTouch.pointerCount = 1;
- jumpyTouchFilter.jumpyPointsDropped += 1;
-
-#if DEBUG_HACKS
- LOGD("JumpyTouchFilter: Pointer 2 dropped");
-#endif
- return true;
- } else if (lastTouch.pointerCount == 2 && pointerCount == 1) {
- // The event when we go from 2 -> 1 tends to be messed up too
- currentTouch.pointerCount = 2;
- currentTouch.pointers[0] = lastTouch.pointers[0];
- currentTouch.pointers[1] = lastTouch.pointers[1];
- jumpyTouchFilter.jumpyPointsDropped += 1;
-
-#if DEBUG_HACKS
- for (int32_t i = 0; i < 2; i++) {
- LOGD("JumpyTouchFilter: Pointer %d replaced (%d, %d)", i,
- currentTouch.pointers[i].x, currentTouch.pointers[i].y);
- }
-#endif
- return true;
- }
- }
- // Reset jumpy points dropped on other transitions or if limit exceeded.
- jumpyTouchFilter.jumpyPointsDropped = 0;
-
-#if DEBUG_HACKS
- LOGD("JumpyTouchFilter: Transition - drop limit reset");
-#endif
- return false;
- }
-
- // We have the same number of pointers as last time.
- // A 'jumpy' point is one where the coordinate value for one axis
- // has jumped to the other pointer's location. No need to do anything
- // else if we only have one pointer.
- if (pointerCount < 2) {
- return false;
- }
-
- if (jumpyTouchFilter.jumpyPointsDropped < JUMPY_DROP_LIMIT) {
- int jumpyEpsilon = parameters.yAxis.range / JUMPY_EPSILON_DIVISOR;
-
- // We only replace the single worst jumpy point as characterized by pointer distance
- // in a single axis.
- int32_t badPointerIndex = -1;
- int32_t badPointerReplacementIndex = -1;
- int32_t badPointerDistance = INT_MIN; // distance to be corrected
-
- for (uint32_t i = pointerCount; i-- > 0; ) {
- int32_t x = currentTouch.pointers[i].x;
- int32_t y = currentTouch.pointers[i].y;
-
-#if DEBUG_HACKS
- LOGD("JumpyTouchFilter: Point %d (%d, %d)", i, x, y);
-#endif
-
- // Check if a touch point is too close to another's coordinates
- bool dropX = false, dropY = false;
- for (uint32_t j = 0; j < pointerCount; j++) {
- if (i == j) {
- continue;
- }
-
- if (abs(x - currentTouch.pointers[j].x) <= jumpyEpsilon) {
- dropX = true;
- break;
- }
-
- if (abs(y - currentTouch.pointers[j].y) <= jumpyEpsilon) {
- dropY = true;
- break;
- }
- }
- if (! dropX && ! dropY) {
- continue; // not jumpy
- }
-
- // Find a replacement candidate by comparing with older points on the
- // complementary (non-jumpy) axis.
- int32_t distance = INT_MIN; // distance to be corrected
- int32_t replacementIndex = -1;
-
- if (dropX) {
- // X looks too close. Find an older replacement point with a close Y.
- int32_t smallestDeltaY = INT_MAX;
- for (uint32_t j = 0; j < pointerCount; j++) {
- int32_t deltaY = abs(y - lastTouch.pointers[j].y);
- if (deltaY < smallestDeltaY) {
- smallestDeltaY = deltaY;
- replacementIndex = j;
- }
- }
- distance = abs(x - lastTouch.pointers[replacementIndex].x);
- } else {
- // Y looks too close. Find an older replacement point with a close X.
- int32_t smallestDeltaX = INT_MAX;
- for (uint32_t j = 0; j < pointerCount; j++) {
- int32_t deltaX = abs(x - lastTouch.pointers[j].x);
- if (deltaX < smallestDeltaX) {
- smallestDeltaX = deltaX;
- replacementIndex = j;
- }
- }
- distance = abs(y - lastTouch.pointers[replacementIndex].y);
- }
-
- // If replacing this pointer would correct a worse error than the previous ones
- // considered, then use this replacement instead.
- if (distance > badPointerDistance) {
- badPointerIndex = i;
- badPointerReplacementIndex = replacementIndex;
- badPointerDistance = distance;
- }
- }
-
- // Correct the jumpy pointer if one was found.
- if (badPointerIndex >= 0) {
-#if DEBUG_HACKS
- LOGD("JumpyTouchFilter: Replacing bad pointer %d with (%d, %d)",
- badPointerIndex,
- lastTouch.pointers[badPointerReplacementIndex].x,
- lastTouch.pointers[badPointerReplacementIndex].y);
-#endif
-
- currentTouch.pointers[badPointerIndex].x =
- lastTouch.pointers[badPointerReplacementIndex].x;
- currentTouch.pointers[badPointerIndex].y =
- lastTouch.pointers[badPointerReplacementIndex].y;
- jumpyTouchFilter.jumpyPointsDropped += 1;
- return true;
- }
- }
-
- jumpyTouchFilter.jumpyPointsDropped = 0;
- return false;
-}
-
-/* Special hack for devices that have bad screen data: aggregate and
- * compute averages of the coordinate data, to reduce the amount of
- * jitter seen by applications. */
-void InputDevice::TouchScreenState::applyAveragingTouchFilter() {
- for (uint32_t currentIndex = 0; currentIndex < currentTouch.pointerCount; currentIndex++) {
- uint32_t id = currentTouch.pointers[currentIndex].id;
- int32_t x = currentTouch.pointers[currentIndex].x;
- int32_t y = currentTouch.pointers[currentIndex].y;
- int32_t pressure = currentTouch.pointers[currentIndex].pressure;
-
- if (lastTouch.idBits.hasBit(id)) {
- // Pointer was down before and is still down now.
- // Compute average over history trace.
- uint32_t start = averagingTouchFilter.historyStart[id];
- uint32_t end = averagingTouchFilter.historyEnd[id];
-
- int64_t deltaX = x - averagingTouchFilter.historyData[end].pointers[id].x;
- int64_t deltaY = y - averagingTouchFilter.historyData[end].pointers[id].y;
- uint64_t distance = uint64_t(deltaX * deltaX + deltaY * deltaY);
-
-#if DEBUG_HACKS
- LOGD("AveragingTouchFilter: Pointer id %d - Distance from last sample: %lld",
- id, distance);
-#endif
-
- if (distance < AVERAGING_DISTANCE_LIMIT) {
- // Increment end index in preparation for recording new historical data.
- end += 1;
- if (end > AVERAGING_HISTORY_SIZE) {
- end = 0;
- }
-
- // If the end index has looped back to the start index then we have filled
- // the historical trace up to the desired size so we drop the historical
- // data at the start of the trace.
- if (end == start) {
- start += 1;
- if (start > AVERAGING_HISTORY_SIZE) {
- start = 0;
- }
- }
-
- // Add the raw data to the historical trace.
- averagingTouchFilter.historyStart[id] = start;
- averagingTouchFilter.historyEnd[id] = end;
- averagingTouchFilter.historyData[end].pointers[id].x = x;
- averagingTouchFilter.historyData[end].pointers[id].y = y;
- averagingTouchFilter.historyData[end].pointers[id].pressure = pressure;
-
- // Average over all historical positions in the trace by total pressure.
- int32_t averagedX = 0;
- int32_t averagedY = 0;
- int32_t totalPressure = 0;
- for (;;) {
- int32_t historicalX = averagingTouchFilter.historyData[start].pointers[id].x;
- int32_t historicalY = averagingTouchFilter.historyData[start].pointers[id].y;
- int32_t historicalPressure = averagingTouchFilter.historyData[start]
- .pointers[id].pressure;
-
- averagedX += historicalX * historicalPressure;
- averagedY += historicalY * historicalPressure;
- totalPressure += historicalPressure;
-
- if (start == end) {
- break;
- }
-
- start += 1;
- if (start > AVERAGING_HISTORY_SIZE) {
- start = 0;
- }
- }
-
- averagedX /= totalPressure;
- averagedY /= totalPressure;
-
-#if DEBUG_HACKS
- LOGD("AveragingTouchFilter: Pointer id %d - "
- "totalPressure=%d, averagedX=%d, averagedY=%d", id, totalPressure,
- averagedX, averagedY);
-#endif
-
- currentTouch.pointers[currentIndex].x = averagedX;
- currentTouch.pointers[currentIndex].y = averagedY;
- } else {
-#if DEBUG_HACKS
- LOGD("AveragingTouchFilter: Pointer id %d - Exceeded max distance", id);
-#endif
- }
- } else {
-#if DEBUG_HACKS
- LOGD("AveragingTouchFilter: Pointer id %d - Pointer went up", id);
-#endif
- }
-
- // Reset pointer history.
- averagingTouchFilter.historyStart[id] = 0;
- averagingTouchFilter.historyEnd[id] = 0;
- averagingTouchFilter.historyData[0].pointers[id].x = x;
- averagingTouchFilter.historyData[0].pointers[id].y = y;
- averagingTouchFilter.historyData[0].pointers[id].pressure = pressure;
- }
-}
-
-bool InputDevice::TouchScreenState::isPointInsideDisplay(int32_t x, int32_t y) const {
- if (! parameters.xAxis.valid || ! parameters.yAxis.valid) {
- // Assume all points on a touch screen without valid axis parameters are
- // inside the display.
- return true;
- }
-
- return x >= parameters.xAxis.minValue
- && x <= parameters.xAxis.maxValue
- && y >= parameters.yAxis.minValue
- && y <= parameters.yAxis.maxValue;
-}
-
-const InputDevice::VirtualKey* InputDevice::TouchScreenState::findVirtualKeyHit() const {
- int32_t x = currentTouch.pointers[0].x;
- int32_t y = currentTouch.pointers[0].y;
- for (size_t i = 0; i < virtualKeys.size(); i++) {
- const InputDevice::VirtualKey& virtualKey = virtualKeys[i];
-
-#if DEBUG_VIRTUAL_KEYS
- LOGD("VirtualKeys: Hit test (%d, %d): keyCode=%d, scanCode=%d, "
- "left=%d, top=%d, right=%d, bottom=%d",
- x, y,
- virtualKey.keyCode, virtualKey.scanCode,
- virtualKey.hitLeft, virtualKey.hitTop,
- virtualKey.hitRight, virtualKey.hitBottom);
-#endif
-
- if (virtualKey.isHit(x, y)) {
- return & virtualKey;
- }
- }
-
- return NULL;
-}
-
-
-// --- InputDevice::SingleTouchScreenState ---
-
-void InputDevice::SingleTouchScreenState::reset() {
- accumulator.clear();
- current.down = false;
- current.x = 0;
- current.y = 0;
- current.pressure = 0;
- current.size = 0;
-}
-
-
-// --- InputDevice::MultiTouchScreenState ---
-
-void InputDevice::MultiTouchScreenState::reset() {
- accumulator.clear();
-}
-
-
// --- InputReader ---
InputReader::InputReader(const sp<EventHubInterface>& eventHub,
@@ -927,32 +253,30 @@
bool down = rawEvent->value != 0;
int32_t scanCode = rawEvent->scanCode;
- if (device->isKeyboard() && (scanCode < BTN_FIRST || scanCode > BTN_LAST)) {
- int32_t keyCode = rawEvent->keyCode;
- onKey(rawEvent->when, device, down, keyCode, scanCode, rawEvent->flags);
- } else if (device->isSingleTouchScreen()) {
+ if (device->isSingleTouchScreen()) {
switch (rawEvent->scanCode) {
case BTN_TOUCH:
device->singleTouchScreen.accumulator.fields |=
InputDevice::SingleTouchScreenState::Accumulator::FIELD_BTN_TOUCH;
device->singleTouchScreen.accumulator.btnTouch = down;
- break;
+ return;
}
- } else if (device->isTrackball()) {
+ }
+
+ if (device->isTrackball()) {
switch (rawEvent->scanCode) {
case BTN_MOUSE:
device->trackball.accumulator.fields |=
InputDevice::TrackballState::Accumulator::FIELD_BTN_MOUSE;
device->trackball.accumulator.btnMouse = down;
-
- // send the down immediately
- // XXX this emulates the old behavior of KeyInputQueue, unclear whether it is
- // necessary or if we can wait until the next sync
- onTrackballStateChanged(rawEvent->when, device);
- device->trackball.accumulator.clear();
- break;
+ return;
}
}
+
+ if (device->isKeyboard()) {
+ int32_t keyCode = rawEvent->keyCode;
+ onKey(rawEvent->when, device, down, keyCode, scanCode, rawEvent->flags);
+ }
}
void InputReader::handleRelativeMotion(const RawEvent* rawEvent) {
diff --git a/libs/ui/InputTransport.cpp b/libs/ui/InputTransport.cpp
index 25def3c..fc83e31 100644
--- a/libs/ui/InputTransport.cpp
+++ b/libs/ui/InputTransport.cpp
@@ -690,22 +690,3 @@
}
} // namespace android
-
-// --- AInputQueue ---
-
-using android::InputEvent;
-using android::InputChannel;
-using android::InputConsumer;
-using android::sp;
-using android::status_t;
-
-AInputQueue::AInputQueue(const sp<InputChannel>& channel) :
- mConsumer(channel) {
-}
-
-AInputQueue::~AInputQueue() {
-}
-
-status_t AInputQueue::consume(InputEvent** event) {
- return mConsumer.consume(&mInputEventFactory, event);
-}
diff --git a/libs/ui/PixelFormat.cpp b/libs/ui/PixelFormat.cpp
index b205418..edf1aed 100644
--- a/libs/ui/PixelFormat.cpp
+++ b/libs/ui/PixelFormat.cpp
@@ -60,7 +60,6 @@
switch (format) {
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
case HAL_PIXEL_FORMAT_YCbCr_422_I:
- case HAL_PIXEL_FORMAT_YV16:
info->bitsPerPixel = 16;
goto done;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
diff --git a/libs/ui/tests/Android.mk b/libs/ui/tests/Android.mk
index 46d7493..62f824f 100644
--- a/libs/ui/tests/Android.mk
+++ b/libs/ui/tests/Android.mk
@@ -2,6 +2,9 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
+ifneq ($(TARGET_SIMULATOR),true)
+
+# Build the unit tests.
test_src_files := \
InputChannel_test.cpp \
InputDispatcher_test.cpp \
@@ -43,3 +46,5 @@
# Build the manual test programs.
include $(call all-subdir-makefiles)
+
+endif
\ No newline at end of file
diff --git a/libs/ui/tests/InputPublisherAndConsumer_test.cpp b/libs/ui/tests/InputPublisherAndConsumer_test.cpp
index 2d6b531..55504f2 100644
--- a/libs/ui/tests/InputPublisherAndConsumer_test.cpp
+++ b/libs/ui/tests/InputPublisherAndConsumer_test.cpp
@@ -76,7 +76,7 @@
const int32_t nature = INPUT_EVENT_NATURE_KEY;
const int32_t action = KEY_EVENT_ACTION_DOWN;
const int32_t flags = KEY_EVENT_FLAG_FROM_SYSTEM;
- const int32_t keyCode = KEYCODE_ENTER;
+ const int32_t keyCode = AKEYCODE_ENTER;
const int32_t scanCode = 13;
const int32_t metaState = META_ALT_LEFT_ON | META_ALT_ON;
const int32_t repeatCount = 1;
diff --git a/libs/utils/Android.mk b/libs/utils/Android.mk
index 2bb42ab..8bd5823 100644
--- a/libs/utils/Android.mk
+++ b/libs/utils/Android.mk
@@ -122,3 +122,13 @@
include $(BUILD_STATIC_LIBRARY)
endif
endif
+
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
\ No newline at end of file
diff --git a/libs/utils/ObbFile.cpp b/libs/utils/ObbFile.cpp
index 3a4a03a..fe49300 100644
--- a/libs/utils/ObbFile.cpp
+++ b/libs/utils/ObbFile.cpp
@@ -91,22 +91,24 @@
fd = ::open(filename, O_RDONLY);
if (fd < 0) {
+ LOGW("couldn't open file %s: %s", filename, strerror(errno));
goto out;
}
success = readFrom(fd);
close(fd);
-out:
if (!success) {
- LOGW("failed to read from %s\n", filename);
+ LOGW("failed to read from %s (fd=%d)\n", filename, fd);
}
+
+out:
return success;
}
bool ObbFile::readFrom(int fd)
{
if (fd < 0) {
- LOGW("failed to read file\n");
+ LOGW("attempt to read from invalid fd\n");
return false;
}
@@ -149,10 +151,16 @@
footerSize = get4LE((unsigned char*)footer);
if (footerSize > (size_t)fileLength - kFooterTagSize
|| footerSize > kMaxBufSize) {
- LOGW("claimed footer size is too large (0x%08lx; file size is 0x%08llx)\n",
+ LOGW("claimed footer size is too large (0x%08zx; file size is 0x%08llx)\n",
footerSize, fileLength);
return false;
}
+
+ if (footerSize < kFooterMinSize) {
+ LOGW("claimed footer size is too small (%08zx; minimum size is 0x%x)\n",
+ footerSize, kFooterMinSize);
+ return false;
+ }
}
my_off64_t fileOffset = fileLength - footerSize - kFooterTagSize;
@@ -161,26 +169,22 @@
return false;
}
- size_t readAmount = kMaxBufSize;
- if (readAmount > footerSize)
- readAmount = footerSize;
-
- char* scanBuf = (char*)malloc(readAmount);
+ char* scanBuf = (char*)malloc(footerSize);
if (scanBuf == NULL) {
LOGW("couldn't allocate scanBuf: %s\n", strerror(errno));
return false;
}
- actual = TEMP_FAILURE_RETRY(read(fd, scanBuf, readAmount));
+ actual = TEMP_FAILURE_RETRY(read(fd, scanBuf, footerSize));
// readAmount is guaranteed to be less than kMaxBufSize
- if (actual != (ssize_t)readAmount) {
+ if (actual != (ssize_t)footerSize) {
LOGI("couldn't read ObbFile footer: %s\n", strerror(errno));
free(scanBuf);
return false;
}
#ifdef DEBUG
- for (int i = 0; i < readAmount; ++i) {
+ for (int i = 0; i < footerSize; ++i) {
LOGI("char: 0x%02x", scanBuf[i]);
}
#endif
@@ -197,7 +201,8 @@
uint32_t packageNameLen = get4LE((unsigned char*)scanBuf + kPackageNameLenOffset);
if (packageNameLen <= 0
|| packageNameLen > (footerSize - kPackageNameOffset)) {
- LOGW("bad ObbFile package name length (0x%08x)\n", packageNameLen);
+ LOGW("bad ObbFile package name length (0x%04x; 0x%04x possible)\n",
+ packageNameLen, footerSize - kPackageNameOffset);
free(scanBuf);
return false;
}
@@ -206,6 +211,11 @@
mPackageName = String8(const_cast<char*>(packageName), packageNameLen);
free(scanBuf);
+
+#ifdef DEBUG
+ LOGI("Obb scan succeeded: packageName=%s, version=%d\n", mPackageName.string(), mVersion);
+#endif
+
return true;
}
@@ -234,6 +244,8 @@
return false;
}
+ my_lseek64(fd, 0, SEEK_END);
+
if (mPackageName.size() == 0 || mVersion == -1) {
LOGW("tried to write uninitialized ObbFile data");
return false;
diff --git a/libs/utils/tests/Android.mk b/libs/utils/tests/Android.mk
index f1b8cd5..b9f206a 100644
--- a/libs/utils/tests/Android.mk
+++ b/libs/utils/tests/Android.mk
@@ -2,6 +2,9 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
+ifneq ($(TARGET_SIMULATOR),true)
+
+# Build the unit tests.
test_src_files := \
ObbFile_test.cpp \
PollLoop_test.cpp
@@ -37,3 +40,5 @@
$(eval LOCAL_MODULE_TAGS := $(module_tags)) \
$(eval include $(BUILD_EXECUTABLE)) \
)
+
+endif
\ No newline at end of file
diff --git a/libs/utils/tests/ObbFile_test.cpp b/libs/utils/tests/ObbFile_test.cpp
index 05aaf08..29bb70a 100644
--- a/libs/utils/tests/ObbFile_test.cpp
+++ b/libs/utils/tests/ObbFile_test.cpp
@@ -22,6 +22,8 @@
#include <gtest/gtest.h>
+#include <fcntl.h>
+
namespace android {
#define TEST_FILENAME "/test.obb"
@@ -39,6 +41,11 @@
const int totalLen = strlen(mExternalStorage) + strlen(TEST_FILENAME) + 1;
mFileName = new char[totalLen];
snprintf(mFileName, totalLen, "%s%s", mExternalStorage, TEST_FILENAME);
+
+ int fd = ::open(mFileName, O_CREAT | O_TRUNC);
+ if (fd < 0) {
+ FAIL() << "Couldn't create " << mFileName << " for tests";
+ }
}
virtual void TearDown() {
@@ -46,8 +53,8 @@
};
TEST_F(ObbFileTest, ReadFailure) {
- EXPECT_FALSE(mObbFile->readFrom(-1))
- << "No failure on invalid file descriptor";
+ EXPECT_FALSE(mObbFile->readFrom(-1))
+ << "No failure on invalid file descriptor";
}
TEST_F(ObbFileTest, WriteThenRead) {
@@ -66,10 +73,10 @@
<< "couldn't read from fake .obb file";
EXPECT_EQ(versionNum, mObbFile->getVersion())
- << "version didn't come out the same as it went in";
+ << "version didn't come out the same as it went in";
const char* currentPackageName = mObbFile->getPackageName().string();
EXPECT_STREQ(packageName, currentPackageName)
- << "package name didn't come out the same as it went in";
+ << "package name didn't come out the same as it went in";
}
}
diff --git a/media/java/android/media/MediaFile.java b/media/java/android/media/MediaFile.java
index 0decb1d..a346ae4 100644
--- a/media/java/android/media/MediaFile.java
+++ b/media/java/android/media/MediaFile.java
@@ -107,6 +107,9 @@
// maps mime type to MTP format code
private static HashMap<String, Integer> sMimeTypeToFormatMap
= new HashMap<String, Integer>();
+ // maps MTP format code to mime type
+ private static HashMap<Integer, String> sFormatToMimeTypeMap
+ = new HashMap<Integer, String>();
static void addFileType(String extension, int fileType, String mimeType) {
sFileTypeMap.put(extension, new MediaFileType(fileType, mimeType));
@@ -117,6 +120,7 @@
addFileType(extension, fileType, mimeType);
sFileTypeToFormatMap.put(extension, Integer.valueOf(mtpFormatCode));
sMimeTypeToFormatMap.put(mimeType, Integer.valueOf(mtpFormatCode));
+ sFormatToMimeTypeMap.put(mtpFormatCode, mimeType);
}
private static boolean isWMAEnabled() {
@@ -253,4 +257,8 @@
}
return Mtp.Object.FORMAT_UNDEFINED;
}
+
+ public static String getMimeTypeForFormatCode(int formatCode) {
+ return sFormatToMimeTypeMap.get(formatCode);
+ }
}
diff --git a/media/java/android/media/MediaScanner.java b/media/java/android/media/MediaScanner.java
index 3333268..7cbe409 100644
--- a/media/java/android/media/MediaScanner.java
+++ b/media/java/android/media/MediaScanner.java
@@ -305,6 +305,7 @@
private Uri mGenresUri;
private Uri mPlaylistsUri;
private boolean mProcessPlaylists, mProcessGenres;
+ private int mMtpObjectHandle;
// used when scanning the image database so we know whether we have to prune
// old thumbnail files
@@ -625,6 +626,9 @@
map.put(MediaStore.MediaColumns.DATE_MODIFIED, mLastModified);
map.put(MediaStore.MediaColumns.SIZE, mFileSize);
map.put(MediaStore.MediaColumns.MIME_TYPE, mMimeType);
+ if (mMtpObjectHandle != 0) {
+ map.put(MediaStore.MediaColumns.MTP_OBJECT_HANDLE, mMtpObjectHandle);
+ }
if (MediaFile.isVideoFileType(mFileType)) {
map.put(Video.Media.ARTIST, (mArtist != null && mArtist.length() > 0 ? mArtist : MediaStore.UNKNOWN_STRING));
@@ -1227,6 +1231,14 @@
}
}
+ public Uri scanMtpFile(String path, String volumeName, int objectHandle, int format) {
+ String mimeType = MediaFile.getMimeTypeForFormatCode(format);
+ mMtpObjectHandle = objectHandle;
+ Uri result = scanSingleFile(path, volumeName, mimeType);
+ mMtpObjectHandle = 0;
+ return result;
+ }
+
// returns the number of matching file/directory names, starting from the right
private int matchPaths(String path1, String path2) {
int result = 0;
diff --git a/media/java/android/media/MtpDatabase.java b/media/java/android/media/MtpDatabase.java
index e37ea93..2b311f5 100644
--- a/media/java/android/media/MtpDatabase.java
+++ b/media/java/android/media/MtpDatabase.java
@@ -17,6 +17,7 @@
package android.media;
import android.content.Context;
+import android.content.ContentValues;
import android.content.IContentProvider;
import android.database.Cursor;
import android.net.Uri;
@@ -35,6 +36,9 @@
private final String mVolumeName;
private final Uri mObjectsUri;
+ // FIXME - this should be passed in via the constructor
+ private final int mStorageID = 0x00010001;
+
private static final String[] ID_PROJECTION = new String[] {
MtpObjects.ObjectColumns._ID, // 0
};
@@ -57,6 +61,22 @@
private static final String PARENT_FORMAT_WHERE = PARENT_WHERE + " AND "
+ MtpObjects.ObjectColumns.FORMAT + "=?";
+ private final MediaScanner mMediaScanner;
+
+ // MTP property codes
+ private static final int MTP_PROPERTY_STORAGE_ID = 0xDC01;
+ private static final int MTP_PROPERTY_OBJECT_FORMAT = 0xDC02;
+ private static final int MTP_PROPERTY_OBJECT_SIZE = 0xDC04;
+ private static final int MTP_PROPERTY_OBJECT_FILE_NAME = 0xDC07;
+ private static final int MTP_PROPERTY_DATE_MODIFIED = 0xDC09;
+ private static final int MTP_PROPERTY_PARENT_OBJECT = 0xDC0B;
+
+ // MTP response codes
+ private static final int MTP_RESPONSE_OK = 0x2001;
+ private static final int MTP_RESPONSE_GENERAL_ERROR = 0x2002;
+ private static final int MTP_RESPONSE_INVALID_OBJECT_HANDLE = 0x2009;
+ private static final int MTP_RESPONSE_OBJECT_PROP_NOT_SUPPORTED = 0xA80A;
+
static {
System.loadLibrary("media_jni");
}
@@ -67,6 +87,7 @@
mMediaProvider = context.getContentResolver().acquireProvider("media");
mVolumeName = volumeName;
mObjectsUri = MtpObjects.getContentUri(volumeName);
+ mMediaScanner = new MediaScanner(context);
}
@Override
@@ -74,10 +95,35 @@
native_finalize();
}
- private int addFile(String path, int format, int parent,
+ private int beginSendObject(String path, int format, int parent,
int storage, long size, long modified) {
- Log.d(TAG, "addFile " + path);
- return 0;
+ ContentValues values = new ContentValues();
+ values.put(MtpObjects.ObjectColumns.DATA, path);
+ values.put(MtpObjects.ObjectColumns.FORMAT, format);
+ values.put(MtpObjects.ObjectColumns.PARENT, parent);
+ // storage is ignored for now
+ values.put(MtpObjects.ObjectColumns.SIZE, size);
+ values.put(MtpObjects.ObjectColumns.DATE_MODIFIED, modified);
+
+ try {
+ Uri uri = mMediaProvider.insert(mObjectsUri, values);
+ if (uri != null) {
+ return Integer.parseInt(uri.getPathSegments().get(2));
+ } else {
+ return -1;
+ }
+ } catch (RemoteException e) {
+ Log.e(TAG, "RemoteException in beginSendObject", e);
+ return -1;
+ }
+ }
+
+ private void endSendObject(String path, int handle, int format, boolean succeeded) {
+ if (succeeded) {
+ Uri uri = mMediaScanner.scanMtpFile(path, mVolumeName, handle, format);
+ } else {
+ deleteFile(handle);
+ }
}
private int[] getObjectList(int storageID, int format, int parent) {
@@ -121,7 +167,71 @@
private int getObjectProperty(int handle, int property,
long[] outIntValue, char[] outStringValue) {
Log.d(TAG, "getObjectProperty: " + property);
- return 0;
+ String column = null;
+ boolean isString = false;
+
+ switch (property) {
+ case MTP_PROPERTY_STORAGE_ID:
+ outIntValue[0] = mStorageID;
+ return MTP_RESPONSE_OK;
+ case MTP_PROPERTY_OBJECT_FORMAT:
+ column = MtpObjects.ObjectColumns.FORMAT;
+ break;
+ case MTP_PROPERTY_OBJECT_SIZE:
+ column = MtpObjects.ObjectColumns.SIZE;
+ break;
+ case MTP_PROPERTY_OBJECT_FILE_NAME:
+ column = MtpObjects.ObjectColumns.DATA;
+ isString = true;
+ break;
+ case MTP_PROPERTY_DATE_MODIFIED:
+ column = MtpObjects.ObjectColumns.DATE_MODIFIED;
+ break;
+ case MTP_PROPERTY_PARENT_OBJECT:
+ column = MtpObjects.ObjectColumns.PARENT;
+ break;
+ default:
+ return MTP_RESPONSE_OBJECT_PROP_NOT_SUPPORTED;
+ }
+
+ Cursor c = null;
+ try {
+ // for now we are only reading properties from the "objects" table
+ c = mMediaProvider.query(mObjectsUri,
+ new String [] { MtpObjects.ObjectColumns._ID, column },
+ ID_WHERE, new String[] { Integer.toString(handle) }, null);
+ if (c != null && c.moveToNext()) {
+ if (isString) {
+ String value = c.getString(1);
+ int start = 0;
+
+ if (property == MTP_PROPERTY_OBJECT_FILE_NAME) {
+ // extract name from full path
+ int lastSlash = value.lastIndexOf('/');
+ if (lastSlash >= 0) {
+ start = lastSlash + 1;
+ }
+ }
+ int end = value.length();
+ if (end - start > 255) {
+ end = start + 255;
+ }
+ value.getChars(start, end, outStringValue, 0);
+ outStringValue[end - start] = 0;
+ } else {
+ outIntValue[0] = c.getLong(1);
+ }
+ return MTP_RESPONSE_OK;
+ }
+ } catch (Exception e) {
+ return MTP_RESPONSE_GENERAL_ERROR;
+ } finally {
+ if (c != null) {
+ c.close();
+ }
+ }
+ // query failed if we get here
+ return MTP_RESPONSE_INVALID_OBJECT_HANDLE;
}
private boolean getObjectInfo(int handle, int[] outStorageFormatParent,
@@ -132,7 +242,7 @@
c = mMediaProvider.query(mObjectsUri, OBJECT_INFO_PROJECTION,
ID_WHERE, new String[] { Integer.toString(handle) }, null);
if (c != null && c.moveToNext()) {
- outStorageFormatParent[0] = 0x00010001;
+ outStorageFormatParent[0] = mStorageID;
outStorageFormatParent[1] = c.getInt(2);
outStorageFormatParent[2] = c.getInt(3);
diff --git a/media/java/android/media/MtpServer.java b/media/java/android/media/MtpServer.java
index 766a86a..b0945a5 100644
--- a/media/java/android/media/MtpServer.java
+++ b/media/java/android/media/MtpServer.java
@@ -47,6 +47,14 @@
native_stop();
}
+ public void sendObjectAdded(int handle) {
+ native_send_object_added(handle);
+ }
+
+ public void sendObjectRemoved(int handle) {
+ native_send_object_removed(handle);
+ }
+
// used by the JNI code
private int mNativeContext;
@@ -54,4 +62,6 @@
private native final void native_finalize();
private native final void native_start();
private native final void native_stop();
+ private native final void native_send_object_added(int handle);
+ private native final void native_send_object_removed(int handle);
}
diff --git a/media/jni/android_media_MtpDatabase.cpp b/media/jni/android_media_MtpDatabase.cpp
index 6bdd8f0..53e41e8 100644
--- a/media/jni/android_media_MtpDatabase.cpp
+++ b/media/jni/android_media_MtpDatabase.cpp
@@ -36,7 +36,8 @@
// ----------------------------------------------------------------------------
-static jmethodID method_addFile;
+static jmethodID method_beginSendObject;
+static jmethodID method_endSendObject;
static jmethodID method_getObjectList;
static jmethodID method_getObjectProperty;
static jmethodID method_getObjectInfo;
@@ -62,13 +63,18 @@
virtual ~MyMtpDatabase();
void cleanup(JNIEnv *env);
- virtual MtpObjectHandle addFile(const char* path,
+ virtual MtpObjectHandle beginSendObject(const char* path,
MtpObjectFormat format,
MtpObjectHandle parent,
MtpStorageID storage,
uint64_t size,
time_t modified);
+ virtual void endSendObject(const char* path,
+ MtpObjectHandle handle,
+ MtpObjectFormat format,
+ bool succeeded);
+
virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
MtpObjectFormat format,
MtpObjectHandle parent);
@@ -135,17 +141,24 @@
MyMtpDatabase::~MyMtpDatabase() {
}
-MtpObjectHandle MyMtpDatabase::addFile(const char* path,
+MtpObjectHandle MyMtpDatabase::beginSendObject(const char* path,
MtpObjectFormat format,
MtpObjectHandle parent,
MtpStorageID storage,
uint64_t size,
time_t modified) {
JNIEnv* env = AndroidRuntime::getJNIEnv();
- return env->CallIntMethod(mDatabase, method_addFile, env->NewStringUTF(path),
+ return env->CallIntMethod(mDatabase, method_beginSendObject, env->NewStringUTF(path),
(jint)format, (jint)parent, (jint)storage, (jlong)size, (jlong)modified);
}
+void MyMtpDatabase::endSendObject(const char* path, MtpObjectHandle handle,
+ MtpObjectFormat format, bool succeeded) {
+ JNIEnv* env = AndroidRuntime::getJNIEnv();
+ env->CallVoidMethod(mDatabase, method_endSendObject, env->NewStringUTF(path),
+ (jint)handle, (jint)format, (jboolean)succeeded);
+}
+
MtpObjectHandleList* MyMtpDatabase::getObjectList(MtpStorageID storageID,
MtpObjectFormat format,
MtpObjectHandle parent) {
@@ -397,9 +410,14 @@
LOGE("Can't find android/media/MtpDatabase");
return -1;
}
- method_addFile = env->GetMethodID(clazz, "addFile", "(Ljava/lang/String;IIIJJ)I");
- if (method_addFile == NULL) {
- LOGE("Can't find addFile");
+ method_beginSendObject = env->GetMethodID(clazz, "beginSendObject", "(Ljava/lang/String;IIIJJ)I");
+ if (method_beginSendObject == NULL) {
+ LOGE("Can't find beginSendObject");
+ return -1;
+ }
+ method_endSendObject = env->GetMethodID(clazz, "endSendObject", "(Ljava/lang/String;IIZ)V");
+ if (method_endSendObject == NULL) {
+ LOGE("Can't find endSendObject");
return -1;
}
method_getObjectList = env->GetMethodID(clazz, "getObjectList", "(III)[I");
diff --git a/media/jni/android_media_MtpServer.cpp b/media/jni/android_media_MtpServer.cpp
index 17e85f8..0883527 100644
--- a/media/jni/android_media_MtpServer.cpp
+++ b/media/jni/android_media_MtpServer.cpp
@@ -50,12 +50,14 @@
class MtpThread : public Thread {
private:
MtpDatabase* mDatabase;
+ MtpServer* mServer;
String8 mStoragePath;
bool mDone;
+ Mutex mMutex;
public:
MtpThread(MtpDatabase* database, const char* storagePath)
- : mDatabase(database), mStoragePath(storagePath), mDone(false)
+ : mDatabase(database), mServer(NULL), mStoragePath(storagePath), mDone(false)
{
}
@@ -67,14 +69,19 @@
return false;
}
- MtpServer* server = new MtpServer(fd, mDatabase, AID_SDCARD_RW, 0664, 0775);
- server->addStorage(mStoragePath);
+ mMutex.lock();
+ mServer = new MtpServer(fd, mDatabase, AID_SDCARD_RW, 0664, 0775);
+ mServer->addStorage(mStoragePath);
+ mMutex.unlock();
- // temporary
- LOGD("MtpThread server->run");
- server->run();
+ LOGD("MtpThread mServer->run");
+ mServer->run();
close(fd);
- delete server;
+
+ mMutex.lock();
+ delete mServer;
+ mServer = NULL;
+ mMutex.unlock();
bool done = mDone;
if (done)
@@ -84,6 +91,24 @@
}
void setDone() { mDone = true; }
+
+ void sendObjectAdded(MtpObjectHandle handle) {
+ mMutex.lock();
+ if (mServer)
+ mServer->sendObjectAdded(handle);
+ else
+ LOGE("sendObjectAdded called while disconnected\n");
+ mMutex.unlock();
+ }
+
+ void sendObjectRemoved(MtpObjectHandle handle) {
+ mMutex.lock();
+ if (mServer)
+ mServer->sendObjectRemoved(handle);
+ else
+ LOGE("sendObjectRemoved called while disconnected\n");
+ mMutex.unlock();
+ }
};
static void
@@ -126,14 +151,38 @@
}
}
+static void
+android_media_MtpServer_send_object_added(JNIEnv *env, jobject thiz, jint handle)
+{
+ LOGD("send_object_added %d\n", handle);
+ MtpThread *thread = (MtpThread *)env->GetIntField(thiz, field_context);
+ if (thread)
+ thread->sendObjectAdded(handle);
+ else
+ LOGE("sendObjectAdded called while disconnected\n");
+}
+
+static void
+android_media_MtpServer_send_object_removed(JNIEnv *env, jobject thiz, jint handle)
+{
+ LOGD("send_object_removed %d\n", handle);
+ MtpThread *thread = (MtpThread *)env->GetIntField(thiz, field_context);
+ if (thread)
+ thread->sendObjectRemoved(handle);
+ else
+ LOGE("sendObjectRemoved called while disconnected\n");
+}
+
// ----------------------------------------------------------------------------
static JNINativeMethod gMethods[] = {
- {"native_setup", "(Landroid/media/MtpDatabase;Ljava/lang/String;)V",
+ {"native_setup", "(Landroid/media/MtpDatabase;Ljava/lang/String;)V",
(void *)android_media_MtpServer_setup},
- {"native_finalize", "()V", (void *)android_media_MtpServer_finalize},
- {"native_start", "()V", (void *)android_media_MtpServer_start},
- {"native_stop", "()V", (void *)android_media_MtpServer_stop},
+ {"native_finalize", "()V", (void *)android_media_MtpServer_finalize},
+ {"native_start", "()V", (void *)android_media_MtpServer_start},
+ {"native_stop", "()V", (void *)android_media_MtpServer_stop},
+ {"native_send_object_added", "(I)V", (void *)android_media_MtpServer_send_object_added},
+ {"native_send_object_removed", "(I)V", (void *)android_media_MtpServer_send_object_removed},
};
static const char* const kClassPathName = "android/media/MtpServer";
diff --git a/media/libeffects/Android.mk b/media/libeffects/Android.mk
index 54e87f3..fc4ceb6 100644
--- a/media/libeffects/Android.mk
+++ b/media/libeffects/Android.mk
@@ -87,7 +87,8 @@
endif
LOCAL_C_INCLUDES := \
- $(call include-path-for, graphics corecg)
+ $(call include-path-for, graphics corecg) \
+ system/media/opensles/include
LOCAL_PRELINK_MODULE := false
diff --git a/media/libeffects/EffectEqualizer.cpp b/media/libeffects/EffectEqualizer.cpp
index d19c6b9..af0c411 100644
--- a/media/libeffects/EffectEqualizer.cpp
+++ b/media/libeffects/EffectEqualizer.cpp
@@ -30,6 +30,12 @@
// effect_interface_t interface implementation for equalizer effect
extern "C" const struct effect_interface_s gEqualizerInterface;
+enum equalizer_state_e {
+ EQUALIZER_STATE_UNINITIALIZED,
+ EQUALIZER_STATE_INITIALIZED,
+ EQUALIZER_STATE_ACTIVE,
+};
+
namespace android {
namespace {
@@ -100,6 +106,7 @@
effect_config_t config;
FormatAdapter adapter;
AudioEqualizer * pEqualizer;
+ uint32_t state;
};
//--- local function prototypes
@@ -151,6 +158,7 @@
pContext->itfe = &gEqualizerInterface;
pContext->pEqualizer = NULL;
+ pContext->state = EQUALIZER_STATE_UNINITIALIZED;
ret = Equalizer_init(pContext);
if (ret < 0) {
@@ -160,6 +168,7 @@
}
*pInterface = (effect_interface_t)pContext;
+ pContext->state = EQUALIZER_STATE_INITIALIZED;
LOGV("EffectLibCreateEffect %p, size %d", pContext, AudioEqualizer::GetInstanceSize(kNumBands)+sizeof(EqualizerContext));
@@ -175,6 +184,7 @@
return -EINVAL;
}
+ pContext->state = EQUALIZER_STATE_UNINITIALIZED;
pContext->pEqualizer->free();
delete pContext;
@@ -528,6 +538,13 @@
return -EINVAL;
}
+ if (pContext->state == EQUALIZER_STATE_UNINITIALIZED) {
+ return -EINVAL;
+ }
+ if (pContext->state == EQUALIZER_STATE_INITIALIZED) {
+ return -ENODATA;
+ }
+
pContext->adapter.process(inBuffer->raw, outBuffer->raw, outBuffer->frameCount);
return 0;
@@ -539,7 +556,7 @@
android::EqualizerContext * pContext = (android::EqualizerContext *) self;
int retsize;
- if (pContext == NULL) {
+ if (pContext == NULL || pContext->state == EQUALIZER_STATE_UNINITIALIZED) {
return -EINVAL;
}
@@ -594,10 +611,25 @@
p->data + p->psize);
} break;
case EFFECT_CMD_ENABLE:
+ if (pReplyData == NULL || *replySize != sizeof(int)) {
+ return -EINVAL;
+ }
+ if (pContext->state != EQUALIZER_STATE_INITIALIZED) {
+ return -ENOSYS;
+ }
+ pContext->state = EQUALIZER_STATE_ACTIVE;
+ LOGV("EFFECT_CMD_ENABLE() OK");
+ *(int *)pReplyData = 0;
+ break;
case EFFECT_CMD_DISABLE:
if (pReplyData == NULL || *replySize != sizeof(int)) {
return -EINVAL;
}
+ if (pContext->state != EQUALIZER_STATE_ACTIVE) {
+ return -ENOSYS;
+ }
+ pContext->state = EQUALIZER_STATE_INITIALIZED;
+ LOGV("EFFECT_CMD_DISABLE() OK");
*(int *)pReplyData = 0;
break;
case EFFECT_CMD_SET_DEVICE:
diff --git a/media/libeffects/EffectReverb.c b/media/libeffects/EffectReverb.c
index 5c87f23..2ce7558 100644
--- a/media/libeffects/EffectReverb.c
+++ b/media/libeffects/EffectReverb.c
@@ -15,8 +15,7 @@
*/
#define LOG_TAG "EffectReverb"
-//
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include <cutils/log.h>
#include <stdlib.h>
#include <string.h>
@@ -143,6 +142,8 @@
module->itfe = &gReverbInterface;
+ module->context.mState = REVERB_STATE_UNINITIALIZED;
+
if (memcmp(&desc->type, SL_IID_PRESETREVERB, sizeof(effect_uuid_t)) == 0) {
preset = 1;
}
@@ -158,6 +159,8 @@
*pInterface = (effect_interface_t) module;
+ module->context.mState = REVERB_STATE_INITIALIZED;
+
LOGV("EffectLibCreateEffect %p ,size %d", module, sizeof(reverb_module_t));
return 0;
@@ -171,6 +174,8 @@
return -EINVAL;
}
+ pRvbModule->context.mState = REVERB_STATE_UNINITIALIZED;
+
free(pRvbModule);
return 0;
}
@@ -195,6 +200,13 @@
pReverb = (reverb_object_t*) &pRvbModule->context;
+ if (pReverb->mState == REVERB_STATE_UNINITIALIZED) {
+ return -EINVAL;
+ }
+ if (pReverb->mState == REVERB_STATE_INITIALIZED) {
+ return -ENODATA;
+ }
+
//if bypassed or the preset forces the signal to be completely dry
if (pReverb->m_bBypass != 0) {
if (inBuffer->raw != outBuffer->raw) {
@@ -257,13 +269,15 @@
return 0;
}
+
static int Reverb_Command(effect_interface_t self, int cmdCode, int cmdSize,
void *pCmdData, int *replySize, void *pReplyData) {
reverb_module_t *pRvbModule = (reverb_module_t *) self;
reverb_object_t *pReverb;
int retsize;
- if (pRvbModule == NULL) {
+ if (pRvbModule == NULL ||
+ pRvbModule->context.mState == REVERB_STATE_UNINITIALIZED) {
return -EINVAL;
}
@@ -277,6 +291,9 @@
return -EINVAL;
}
*(int *) pReplyData = Reverb_Init(pRvbModule, pReverb->m_Aux, pReverb->m_Preset);
+ if (*(int *) pReplyData == 0) {
+ pRvbModule->context.mState = REVERB_STATE_INITIALIZED;
+ }
break;
case EFFECT_CMD_CONFIGURE:
if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
@@ -315,10 +332,25 @@
cmd->vsize, cmd->data + sizeof(int32_t));
break;
case EFFECT_CMD_ENABLE:
+ if (pReplyData == NULL || *replySize != sizeof(int)) {
+ return -EINVAL;
+ }
+ if (pReverb->mState != REVERB_STATE_INITIALIZED) {
+ return -ENOSYS;
+ }
+ pReverb->mState = REVERB_STATE_ACTIVE;
+ LOGV("EFFECT_CMD_ENABLE() OK");
+ *(int *)pReplyData = 0;
+ break;
case EFFECT_CMD_DISABLE:
if (pReplyData == NULL || *replySize != sizeof(int)) {
return -EINVAL;
}
+ if (pReverb->mState != REVERB_STATE_ACTIVE) {
+ return -ENOSYS;
+ }
+ pReverb->mState = REVERB_STATE_INITIALIZED;
+ LOGV("EFFECT_CMD_DISABLE() OK");
*(int *)pReplyData = 0;
break;
case EFFECT_CMD_SET_DEVICE:
diff --git a/media/libeffects/EffectReverb.h b/media/libeffects/EffectReverb.h
index 5af316d..ee8e390 100644
--- a/media/libeffects/EffectReverb.h
+++ b/media/libeffects/EffectReverb.h
@@ -114,6 +114,12 @@
#define AP1_GAIN_RANGE (int)(22936-6553)
+enum reverb_state_e {
+ REVERB_STATE_UNINITIALIZED,
+ REVERB_STATE_INITIALIZED,
+ REVERB_STATE_ACTIVE,
+};
+
/* parameters for each allpass */
typedef struct
{
@@ -279,6 +285,7 @@
uint16_t m_Aux; // if TRUE, is connected as auxiliary effect
uint16_t m_Preset; // if TRUE, expose preset revert interface
+ uint32_t mState;
} reverb_object_t;
diff --git a/media/libeffects/EffectVisualizer.cpp b/media/libeffects/EffectVisualizer.cpp
index f27e296..bcda06e 100644
--- a/media/libeffects/EffectVisualizer.cpp
+++ b/media/libeffects/EffectVisualizer.cpp
@@ -231,7 +231,7 @@
return -EINVAL;
}
if (pContext->mState != VISUALIZER_STATE_ACTIVE) {
- return -ENOSYS;
+ return -ENODATA;
}
if (inBuffer == NULL || inBuffer->raw == NULL ||
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 139992a..3beae7f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -871,17 +871,19 @@
sp<MetaData> meta = cameraSource->getFormat();
- int32_t width, height, stride, sliceHeight;
+ int32_t width, height, stride, sliceHeight, colorFormat;
CHECK(meta->findInt32(kKeyWidth, &width));
CHECK(meta->findInt32(kKeyHeight, &height));
CHECK(meta->findInt32(kKeyStride, &stride));
CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
+ CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));
enc_meta->setInt32(kKeyWidth, width);
enc_meta->setInt32(kKeyHeight, height);
enc_meta->setInt32(kKeyIFramesInterval, mIFramesInterval);
enc_meta->setInt32(kKeyStride, stride);
enc_meta->setInt32(kKeySliceHeight, sliceHeight);
+ enc_meta->setInt32(kKeyColorFormat, colorFormat);
if (mVideoEncoderProfile != -1) {
enc_meta->setInt32(kKeyVideoProfile, mVideoEncoderProfile);
}
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 60d0233..49cf647 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -66,6 +66,7 @@
libstagefright_amrwbdec \
libstagefright_amrwbenc \
libstagefright_avcdec \
+ libstagefright_avcenc \
libstagefright_m4vh263dec \
libstagefright_mp3dec \
libstagefright_vorbisdec \
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index efaab5b..077e123 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -25,6 +25,7 @@
#include "include/AMRWBDecoder.h"
#include "include/AMRWBEncoder.h"
#include "include/AVCDecoder.h"
+#include "include/AVCEncoder.h"
#include "include/M4vH263Decoder.h"
#include "include/MP3Decoder.h"
#include "include/VorbisDecoder.h"
@@ -81,6 +82,7 @@
FACTORY_CREATE_ENCODER(AMRNBEncoder)
FACTORY_CREATE_ENCODER(AMRWBEncoder)
FACTORY_CREATE_ENCODER(AACEncoder)
+FACTORY_CREATE_ENCODER(AVCEncoder)
static sp<MediaSource> InstantiateSoftwareEncoder(
const char *name, const sp<MediaSource> &source,
@@ -94,6 +96,7 @@
FACTORY_REF(AMRNBEncoder)
FACTORY_REF(AMRWBEncoder)
FACTORY_REF(AACEncoder)
+ FACTORY_REF(AVCEncoder)
};
for (size_t i = 0;
i < sizeof(kFactoryInfo) / sizeof(kFactoryInfo[0]); ++i) {
@@ -186,6 +189,7 @@
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.encoder" },
+ { MEDIA_MIMETYPE_VIDEO_AVC, "AVCEncoder" },
// { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.PV.avcenc" },
};
diff --git a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
new file mode 100644
index 0000000..d5eb156
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AVCEncoder"
+#include <utils/Log.h>
+
+#include "AVCEncoder.h"
+
+#include "avcenc_api.h"
+#include "avcenc_int.h"
+#include "OMX_Video.h"
+
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+inline static void ConvertYUV420SemiPlanarToYUV420Planar(
+ uint8_t *inyuv, uint8_t* outyuv,
+ int32_t width, int32_t height) {
+
+ int32_t outYsize = width * height;
+ uint32_t *outy = (uint32_t *) outyuv;
+ uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
+ uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
+
+ /* Y copying */
+ memcpy(outy, inyuv, outYsize);
+
+ /* U & V copying */
+ uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
+ for (int32_t i = height >> 1; i > 0; --i) {
+ for (int32_t j = width >> 2; j > 0; --j) {
+ uint32_t temp = *inyuv_4++;
+ uint32_t tempU = temp & 0xFF;
+ tempU = tempU | ((temp >> 8) & 0xFF00);
+
+ uint32_t tempV = (temp >> 8) & 0xFF;
+ tempV = tempV | ((temp >> 16) & 0xFF00);
+
+ // Flip U and V
+ *outcb++ = tempV;
+ *outcr++ = tempU;
+ }
+ }
+}
+
+static int32_t MallocWrapper(
+ void *userData, int32_t size, int32_t attrs) {
+ return reinterpret_cast<int32_t>(malloc(size));
+}
+
+static void FreeWrapper(void *userData, int32_t ptr) {
+ free(reinterpret_cast<void *>(ptr));
+}
+
+static int32_t DpbAllocWrapper(void *userData,
+ unsigned int sizeInMbs, unsigned int numBuffers) {
+ AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
+ CHECK(encoder != NULL);
+ return encoder->allocOutputBuffers(sizeInMbs, numBuffers);
+}
+
+static int32_t BindFrameWrapper(
+ void *userData, int32_t index, uint8_t **yuv) {
+ AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
+ CHECK(encoder != NULL);
+ return encoder->bindOutputBuffer(index, yuv);
+}
+
+static void UnbindFrameWrapper(void *userData, int32_t index) {
+ AVCEncoder *encoder = static_cast<AVCEncoder *>(userData);
+ CHECK(encoder != NULL);
+ return encoder->unbindOutputBuffer(index);
+}
+
+AVCEncoder::AVCEncoder(
+ const sp<MediaSource>& source,
+ const sp<MetaData>& meta)
+ : mSource(source),
+ mMeta(meta),
+ mNumInputFrames(-1),
+ mStarted(false),
+ mInputBuffer(NULL),
+ mInputFrameData(NULL),
+ mGroup(NULL) {
+
+ LOGV("Construct software AVCEncoder");
+
+ mHandle = new tagAVCHandle;
+ memset(mHandle, 0, sizeof(tagAVCHandle));
+ mHandle->AVCObject = NULL;
+ mHandle->userData = this;
+ mHandle->CBAVC_DPBAlloc = DpbAllocWrapper;
+ mHandle->CBAVC_FrameBind = BindFrameWrapper;
+ mHandle->CBAVC_FrameUnbind = UnbindFrameWrapper;
+ mHandle->CBAVC_Malloc = MallocWrapper;
+ mHandle->CBAVC_Free = FreeWrapper;
+
+ mInitCheck = initCheck(meta);
+}
+
+AVCEncoder::~AVCEncoder() {
+ LOGV("Destruct software AVCEncoder");
+ if (mStarted) {
+ stop();
+ }
+
+ delete mEncParams;
+ delete mHandle;
+}
+
+status_t AVCEncoder::initCheck(const sp<MetaData>& meta) {
+ LOGV("initCheck");
+ CHECK(meta->findInt32(kKeyWidth, &mVideoWidth));
+ CHECK(meta->findInt32(kKeyHeight, &mVideoHeight));
+ CHECK(meta->findInt32(kKeySampleRate, &mVideoFrameRate));
+ CHECK(meta->findInt32(kKeyBitRate, &mVideoBitRate));
+
+ // XXX: Add more color format support
+ CHECK(meta->findInt32(kKeyColorFormat, &mVideoColorFormat));
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420SemiPlanar) {
+ LOGE("Color format %d is not supported", mVideoColorFormat);
+ return BAD_VALUE;
+ }
+ // Allocate spare buffer only when color conversion is needed.
+ // Assume the color format is OMX_COLOR_FormatYUV420SemiPlanar.
+ mInputFrameData =
+ (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
+ CHECK(mInputFrameData);
+ }
+
+ // XXX: Remove this restriction
+ if (mVideoWidth % 16 != 0 || mVideoHeight % 16 != 0) {
+ LOGE("Video frame size %dx%d must be a multiple of 16",
+ mVideoWidth, mVideoHeight);
+ return BAD_VALUE;
+ }
+
+ mEncParams = new tagAVCEncParam;
+ memset(mEncParams, 0, sizeof(mEncParams));
+ mEncParams->width = mVideoWidth;
+ mEncParams->height = mVideoHeight;
+ mEncParams->frame_rate = 1000 * mVideoFrameRate; // In frames/ms!
+ mEncParams->rate_control = AVC_ON;
+ mEncParams->bitrate = mVideoBitRate;
+ mEncParams->initQP = 0;
+ mEncParams->init_CBP_removal_delay = 1600;
+ mEncParams->CPB_size = (uint32_t) (mVideoBitRate >> 1);
+
+ mEncParams->intramb_refresh = 0;
+ mEncParams->auto_scd = AVC_ON;
+ mEncParams->out_of_band_param_set = AVC_ON;
+ mEncParams->poc_type = 2;
+ mEncParams->log2_max_poc_lsb_minus_4 = 12;
+ mEncParams->delta_poc_zero_flag = 0;
+ mEncParams->offset_poc_non_ref = 0;
+ mEncParams->offset_top_bottom = 0;
+ mEncParams->num_ref_in_cycle = 0;
+ mEncParams->offset_poc_ref = NULL;
+
+ mEncParams->num_ref_frame = 1;
+ mEncParams->num_slice_group = 1;
+ mEncParams->fmo_type = 0;
+
+ mEncParams->db_filter = AVC_ON;
+ mEncParams->disable_db_idc = 0;
+
+ mEncParams->alpha_offset = 0;
+ mEncParams->beta_offset = 0;
+ mEncParams->constrained_intra_pred = AVC_OFF;
+
+ mEncParams->data_par = AVC_OFF;
+ mEncParams->fullsearch = AVC_OFF;
+ mEncParams->search_range = 16;
+ mEncParams->sub_pel = AVC_OFF;
+ mEncParams->submb_pred = AVC_OFF;
+ mEncParams->rdopt_mode = AVC_OFF;
+ mEncParams->bidir_pred = AVC_OFF;
+ int32_t nMacroBlocks = ((((mVideoWidth + 15) >> 4) << 4) *
+ (((mVideoHeight + 15) >> 4) << 4)) >> 8;
+ uint32_t *sliceGroup = (uint32_t *) malloc(sizeof(uint32_t) * nMacroBlocks);
+ for (int ii = 0, idx = 0; ii < nMacroBlocks; ++ii) {
+ sliceGroup[ii] = idx++;
+ if (idx >= mEncParams->num_slice_group) {
+ idx = 0;
+ }
+ }
+ mEncParams->slice_group = sliceGroup;
+
+ mEncParams->use_overrun_buffer = AVC_OFF;
+
+ // Set IDR frame refresh interval
+ int32_t iFramesIntervalSec;
+ CHECK(meta->findInt32(kKeyIFramesInterval, &iFramesIntervalSec));
+ if (iFramesIntervalSec < 0) {
+ mEncParams->idr_period = -1;
+ } else if (iFramesIntervalSec == 0) {
+ mEncParams->idr_period = 1; // All I frames
+ } else {
+ mEncParams->idr_period =
+ (iFramesIntervalSec * mVideoFrameRate);
+ }
+ LOGV("idr_period: %d, I-frames interval: %d seconds, and frame rate: %d",
+ mEncParams->idr_period, iFramesIntervalSec, mVideoFrameRate);
+
+ // Set profile and level
+ // If profile and level setting is not correct, failure
+ // is reported when the encoder is initialized.
+ mEncParams->profile = AVC_BASELINE;
+ mEncParams->level = AVC_LEVEL3_2;
+ int32_t profile, level;
+ if (meta->findInt32(kKeyVideoProfile, &profile)) {
+ mEncParams->profile = (AVCProfile) profile;
+ }
+ if (meta->findInt32(kKeyVideoLevel, &level)) {
+ mEncParams->level = (AVCLevel) level;
+ }
+
+
+ mFormat = new MetaData;
+ mFormat->setInt32(kKeyWidth, mVideoWidth);
+ mFormat->setInt32(kKeyHeight, mVideoHeight);
+ mFormat->setInt32(kKeyBitRate, mVideoBitRate);
+ mFormat->setInt32(kKeySampleRate, mVideoFrameRate);
+ mFormat->setInt32(kKeyColorFormat, mVideoColorFormat);
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ mFormat->setCString(kKeyDecoderComponent, "AVCEncoder");
+ return OK;
+}
+
+status_t AVCEncoder::start(MetaData *params) {
+ LOGV("start");
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ if (mStarted) {
+ LOGW("Call start() when encoder already started");
+ return OK;
+ }
+
+ AVCEnc_Status err;
+ err = PVAVCEncInitialize(mHandle, mEncParams, NULL, NULL);
+ if (err != AVCENC_SUCCESS) {
+ LOGE("Failed to initialize the encoder: %d", err);
+ return UNKNOWN_ERROR;
+ }
+
+ mGroup = new MediaBufferGroup();
+ int32_t maxSize;
+ if (AVCENC_SUCCESS !=
+ PVAVCEncGetMaxOutputBufferSize(mHandle, &maxSize)) {
+ maxSize = 31584; // Magic #
+ }
+ mGroup->add_buffer(new MediaBuffer(maxSize));
+
+ mSource->start(params);
+ mNumInputFrames = -2; // 1st two buffers contain SPS and PPS
+ mStarted = true;
+ mSpsPpsHeaderReceived = false;
+ mReadyForNextFrame = true;
+ mIsIDRFrame = 0;
+
+ return OK;
+}
+
+status_t AVCEncoder::stop() {
+ LOGV("stop");
+ if (!mStarted) {
+ LOGW("Call stop() when encoder has not started");
+ return OK;
+ }
+
+ if (mInputBuffer) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ if (mGroup) {
+ delete mGroup;
+ mGroup = NULL;
+ }
+
+ if (mInputFrameData) {
+ delete mInputFrameData;
+ mInputFrameData = NULL;
+ }
+
+ PVAVCCleanUpEncoder(mHandle);
+ mSource->stop();
+ releaseOutputBuffers();
+ mStarted = false;
+
+ return OK;
+}
+
+void AVCEncoder::releaseOutputBuffers() {
+ LOGV("releaseOutputBuffers");
+ for (size_t i = 0; i < mOutputBuffers.size(); ++i) {
+ MediaBuffer *buffer = mOutputBuffers.editItemAt(i);
+ buffer->setObserver(NULL);
+ buffer->release();
+ }
+ mOutputBuffers.clear();
+}
+
+sp<MetaData> AVCEncoder::getFormat() {
+ LOGV("getFormat");
+ return mFormat;
+}
+
+status_t AVCEncoder::read(
+ MediaBuffer **out, const ReadOptions *options) {
+
+ CHECK(!options);
+ *out = NULL;
+
+ MediaBuffer *outputBuffer;
+ CHECK_EQ(OK, mGroup->acquire_buffer(&outputBuffer));
+ uint8_t *outPtr = (uint8_t *) outputBuffer->data();
+ uint32_t dataLength = outputBuffer->size();
+
+ int32_t type;
+ AVCEnc_Status encoderStatus = AVCENC_SUCCESS;
+
+ // Return SPS and PPS for the first two buffers
+ if (!mSpsPpsHeaderReceived) {
+ encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
+ if (encoderStatus == AVCENC_WRONG_STATE) {
+ mSpsPpsHeaderReceived = true;
+ CHECK_EQ(0, mNumInputFrames); // 1st video frame is 0
+ } else {
+ switch (type) {
+ case AVC_NALTYPE_SPS:
+ case AVC_NALTYPE_PPS:
+ LOGV("%s received",
+ (type == AVC_NALTYPE_SPS)? "SPS": "PPS");
+ ++mNumInputFrames;
+ outputBuffer->set_range(0, dataLength);
+ *out = outputBuffer;
+ return OK;
+ default:
+ LOGE("Nal type (%d) other than SPS/PPS is unexpected", type);
+ return UNKNOWN_ERROR;
+ }
+ }
+ }
+
+ // Get next input video frame
+ if (mReadyForNextFrame) {
+ if (mInputBuffer) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+ status_t err = mSource->read(&mInputBuffer, options);
+ if (err != OK) {
+ LOGE("Failed to read input video frame: %d", err);
+ outputBuffer->release();
+ return err;
+ }
+ int64_t timeUs;
+ CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ outputBuffer->meta_data()->setInt64(kKeyTime, timeUs);
+
+ AVCFrameIO videoInput;
+ memset(&videoInput, 0, sizeof(videoInput));
+ videoInput.height = ((mVideoHeight + 15) >> 4) << 4;
+ videoInput.pitch = ((mVideoWidth + 15) >> 4) << 4;
+ videoInput.coding_timestamp = (timeUs + 500) / 1000; // in ms
+ uint8_t *inputData = (uint8_t *) mInputBuffer->data();
+
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
+ CHECK(mInputFrameData);
+ CHECK(mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar);
+ ConvertYUV420SemiPlanarToYUV420Planar(
+ inputData, mInputFrameData, mVideoWidth, mVideoHeight);
+ inputData = mInputFrameData;
+ }
+ CHECK(inputData != NULL);
+ videoInput.YCbCr[0] = inputData;
+ videoInput.YCbCr[1] = videoInput.YCbCr[0] + videoInput.height * videoInput.pitch;
+ videoInput.YCbCr[2] = videoInput.YCbCr[1] +
+ ((videoInput.height * videoInput.pitch) >> 2);
+ videoInput.disp_order = mNumInputFrames;
+
+ encoderStatus = PVAVCEncSetInput(mHandle, &videoInput);
+ if (encoderStatus == AVCENC_SUCCESS ||
+ encoderStatus == AVCENC_NEW_IDR) {
+ mReadyForNextFrame = false;
+ ++mNumInputFrames;
+ if (encoderStatus == AVCENC_NEW_IDR) {
+ mIsIDRFrame = 1;
+ }
+ } else {
+ if (encoderStatus < AVCENC_SUCCESS) {
+ outputBuffer->release();
+ return UNKNOWN_ERROR;
+ } else {
+ outputBuffer->set_range(0, 0);
+ *out = outputBuffer;
+ return OK;
+ }
+ }
+ }
+
+ // Encode an input video frame
+ CHECK(encoderStatus == AVCENC_SUCCESS ||
+ encoderStatus == AVCENC_NEW_IDR);
+ dataLength = outputBuffer->size(); // Reset the output buffer length
+ encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
+ if (encoderStatus == AVCENC_SUCCESS) {
+ outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
+ CHECK_EQ(NULL, PVAVCEncGetOverrunBuffer(mHandle));
+ } else if (encoderStatus == AVCENC_PICTURE_READY) {
+ CHECK_EQ(NULL, PVAVCEncGetOverrunBuffer(mHandle));
+ if (mIsIDRFrame) {
+ outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
+ mIsIDRFrame = 0;
+ LOGV("Output an IDR frame");
+ }
+ mReadyForNextFrame = true;
+ AVCFrameIO recon;
+ if (PVAVCEncGetRecon(mHandle, &recon) == AVCENC_SUCCESS) {
+ PVAVCEncReleaseRecon(mHandle, &recon);
+ }
+ } else {
+ dataLength = 0;
+ mReadyForNextFrame = true;
+ }
+ if (encoderStatus < AVCENC_SUCCESS) {
+ outputBuffer->release();
+ return UNKNOWN_ERROR;
+ }
+
+ outputBuffer->set_range(0, dataLength);
+ *out = outputBuffer;
+ return OK;
+}
+
+int32_t AVCEncoder::allocOutputBuffers(
+ unsigned int sizeInMbs, unsigned int numBuffers) {
+ CHECK(mOutputBuffers.isEmpty());
+ size_t frameSize = (sizeInMbs << 7) * 3;
+ for (unsigned int i = 0; i < numBuffers; ++i) {
+ MediaBuffer *buffer = new MediaBuffer(frameSize);
+ buffer->setObserver(this);
+ mOutputBuffers.push(buffer);
+ }
+
+ return 1;
+}
+
+void AVCEncoder::unbindOutputBuffer(int32_t index) {
+ CHECK(index >= 0);
+}
+
+int32_t AVCEncoder::bindOutputBuffer(int32_t index, uint8_t **yuv) {
+ CHECK(index >= 0);
+ CHECK(index < (int32_t) mOutputBuffers.size());
+ int64_t timeUs;
+ CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ mOutputBuffers[index]->meta_data()->setInt64(kKeyTime, timeUs);
+
+ *yuv = (uint8_t *) mOutputBuffers[index]->data();
+
+ return 1;
+}
+
+void AVCEncoder::signalBufferReturned(MediaBuffer *buffer) {
+}
+
+} // namespace android
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
new file mode 100644
index 0000000..100f239
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ AVCEncoder.cpp \
+ src/avcenc_api.cpp \
+ src/bitstream_io.cpp \
+ src/block.cpp \
+ src/findhalfpel.cpp \
+ src/header.cpp \
+ src/init.cpp \
+ src/intra_est.cpp \
+ src/motion_comp.cpp \
+ src/motion_est.cpp \
+ src/rate_control.cpp \
+ src/residual.cpp \
+ src/sad.cpp \
+ src/sad_halfpel.cpp \
+ src/slice.cpp \
+ src/vlc_encode.cpp
+
+
+LOCAL_MODULE := libstagefright_avcenc
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/src \
+ $(LOCAL_PATH)/../common/include \
+ $(TOP)/external/opencore/extern_libs_v2/khronos/openmax/include \
+ $(TOP)/frameworks/base/media/libstagefright/include
+
+LOCAL_CFLAGS := \
+ -D__arm__ \
+ -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_api.cpp b/media/libstagefright/codecs/avc/enc/src/avcenc_api.cpp
new file mode 100644
index 0000000..d39885d
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/avcenc_api.cpp
@@ -0,0 +1,744 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_api.h"
+#include "avcenc_lib.h"
+
+/* ======================================================================== */
+/* Function : PVAVCGetNALType() */
+/* Date : 11/4/2003 */
+/* Purpose : Sniff NAL type from the bitstream */
+/* In/out : */
+/* Return : AVCENC_SUCCESS if succeed, AVCENC_FAIL if fail. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetNALType(unsigned char *bitstream, int size,
+ int *nal_type, int *nal_ref_idc)
+{
+ int forbidden_zero_bit;
+ if (size > 0)
+ {
+ forbidden_zero_bit = bitstream[0] >> 7;
+ if (forbidden_zero_bit != 0)
+ return AVCENC_FAIL;
+ *nal_ref_idc = (bitstream[0] & 0x60) >> 5;
+ *nal_type = bitstream[0] & 0x1F;
+ return AVCENC_SUCCESS;
+ }
+
+ return AVCENC_FAIL;
+}
+
+
+/* ======================================================================== */
+/* Function : PVAVCEncInitialize() */
+/* Date : 3/18/2004 */
+/* Purpose : Initialize the encoder library, allocate memory and verify */
+/* the profile/level support/settings. */
+/* In/out : Encoding parameters. */
+/* Return : AVCENC_SUCCESS for success. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam,
+ void* extSPS, void* extPPS)
+{
+ AVCEnc_Status status;
+ AVCEncObject *encvid;
+ AVCCommonObj *video;
+ uint32 *userData = (uint32*) avcHandle->userData;
+ int framesize;
+
+ if (avcHandle->AVCObject != NULL)
+ {
+ return AVCENC_ALREADY_INITIALIZED; /* It's already initialized, need to cleanup first */
+ }
+
+ /* not initialized */
+
+ /* allocate videoObject */
+ avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncObject), DEFAULT_ATTR);
+ if (avcHandle->AVCObject == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ encvid = (AVCEncObject*) avcHandle->AVCObject;
+ memset(encvid, 0, sizeof(AVCEncObject)); /* reset everything */
+
+ encvid->enc_state = AVCEnc_Initializing;
+
+ encvid->avcHandle = avcHandle;
+
+ encvid->common = (AVCCommonObj*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), DEFAULT_ATTR);
+ if (encvid->common == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ video = encvid->common;
+ memset(video, 0, sizeof(AVCCommonObj));
+
+ /* allocate bitstream structure */
+ encvid->bitstream = (AVCEncBitstream*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncBitstream), DEFAULT_ATTR);
+ if (encvid->bitstream == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ encvid->bitstream->encvid = encvid; /* to point back for reallocation */
+
+ /* allocate sequence parameter set structure */
+ video->currSeqParams = (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR);
+ if (video->currSeqParams == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ memset(video->currSeqParams, 0, sizeof(AVCSeqParamSet));
+
+ /* allocate picture parameter set structure */
+ video->currPicParams = (AVCPicParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR);
+ if (video->currPicParams == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ memset(video->currPicParams, 0, sizeof(AVCPicParamSet));
+
+ /* allocate slice header structure */
+ video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), DEFAULT_ATTR);
+ if (video->sliceHdr == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ memset(video->sliceHdr, 0, sizeof(AVCSliceHeader));
+
+ /* allocate encoded picture buffer structure*/
+ video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), DEFAULT_ATTR);
+ if (video->decPicBuf == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ memset(video->decPicBuf, 0, sizeof(AVCDecPicBuffer));
+
+ /* allocate rate control structure */
+ encvid->rateCtrl = (AVCRateControl*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCRateControl), DEFAULT_ATTR);
+ if (encvid->rateCtrl == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ memset(encvid->rateCtrl, 0, sizeof(AVCRateControl));
+
+ /* reset frame list, not really needed */
+ video->currPic = NULL;
+ video->currFS = NULL;
+ encvid->currInput = NULL;
+ video->prevRefPic = NULL;
+
+ /* now read encParams, and allocate dimension-dependent variables */
+ /* such as mblock */
+ status = SetEncodeParam(avcHandle, encParam, extSPS, extPPS); /* initialized variables to be used in SPS*/
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ if (encParam->use_overrun_buffer == AVC_ON)
+ {
+ /* allocate overrun buffer */
+ encvid->oBSize = encvid->rateCtrl->cpbSize;
+ if (encvid->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE)
+ {
+ encvid->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE;
+ }
+ encvid->overrunBuffer = (uint8*) avcHandle->CBAVC_Malloc(userData, encvid->oBSize, DEFAULT_ATTR);
+ if (encvid->overrunBuffer == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ }
+ else
+ {
+ encvid->oBSize = 0;
+ encvid->overrunBuffer = NULL;
+ }
+
+ /* allocate frame size dependent structures */
+ framesize = video->FrameHeightInMbs * video->PicWidthInMbs;
+
+ video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR);
+ if (video->mblock == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits * 2, DEFAULT_ATTR);
+ if (video->MbToSliceGroupMap == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ encvid->mot16x16 = (AVCMV*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMV) * framesize, DEFAULT_ATTR);
+ if (encvid->mot16x16 == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ memset(encvid->mot16x16, 0, sizeof(AVCMV)*framesize);
+
+ encvid->intraSearch = (uint8*) avcHandle->CBAVC_Malloc(userData, sizeof(uint8) * framesize, DEFAULT_ATTR);
+ if (encvid->intraSearch == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ encvid->min_cost = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(int) * framesize, DEFAULT_ATTR);
+ if (encvid->min_cost == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ /* initialize motion search related memory */
+ if (AVCENC_SUCCESS != InitMotionSearchModule(avcHandle))
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ if (AVCENC_SUCCESS != InitRateControlModule(avcHandle))
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ /* intialize function pointers */
+ encvid->functionPointer = (AVCEncFuncPtr*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncFuncPtr), DEFAULT_ATTR);
+ if (encvid->functionPointer == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+ encvid->functionPointer->SAD_Macroblock = &AVCSAD_Macroblock_C;
+ encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;
+ encvid->functionPointer->SAD_MB_HalfPel[1] = &AVCSAD_MB_HalfPel_Cxh;
+ encvid->functionPointer->SAD_MB_HalfPel[2] = &AVCSAD_MB_HalfPel_Cyh;
+ encvid->functionPointer->SAD_MB_HalfPel[3] = &AVCSAD_MB_HalfPel_Cxhyh;
+
+ /* initialize timing control */
+ encvid->modTimeRef = 0; /* ALWAYS ASSUME THAT TIMESTAMP START FROM 0 !!!*/
+ video->prevFrameNum = 0;
+ encvid->prevCodedFrameNum = 0;
+ encvid->dispOrdPOCRef = 0;
+
+ if (encvid->outOfBandParamSet == TRUE)
+ {
+ encvid->enc_state = AVCEnc_Encoding_SPS;
+ }
+ else
+ {
+ encvid->enc_state = AVCEnc_Analyzing_Frame;
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+/* ======================================================================== */
+/* Function : PVAVCEncGetMaxOutputSize() */
+/* Date : 11/29/2008 */
+/* Purpose : Return max output buffer size that apps should allocate for */
+/* output buffer. */
+/* In/out : */
+/* Return : AVCENC_SUCCESS for success. */
+/* Modified : size */
+/* ======================================================================== */
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size)
+{
+ AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
+
+ if (encvid == NULL)
+ {
+ return AVCENC_UNINITIALIZED;
+ }
+
+ *size = encvid->rateCtrl->cpbSize;
+
+ return AVCENC_SUCCESS;
+}
+
+/* ======================================================================== */
+/* Function : PVAVCEncSetInput() */
+/* Date : 4/18/2004 */
+/* Purpose : To feed an unencoded original frame to the encoder library. */
+/* In/out : */
+/* Return : AVCENC_SUCCESS for success. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input)
+{
+ AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
+ AVCCommonObj *video = encvid->common;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+
+ AVCEnc_Status status;
+ uint frameNum;
+
+ if (encvid == NULL)
+ {
+ return AVCENC_UNINITIALIZED;
+ }
+
+ if (encvid->enc_state == AVCEnc_WaitingForBuffer)
+ {
+ goto RECALL_INITFRAME;
+ }
+ else if (encvid->enc_state != AVCEnc_Analyzing_Frame)
+ {
+ return AVCENC_FAIL;
+ }
+
+ if (input->pitch > 0xFFFF)
+ {
+ return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch
+ }
+
+ /***********************************/
+
+ /* Let's rate control decide whether to encode this frame or not */
+ /* Also set video->nal_unit_type, sliceHdr->slice_type, video->slice_type */
+ if (AVCENC_SUCCESS != RCDetermineFrameNum(encvid, rateCtrl, input->coding_timestamp, &frameNum))
+ {
+ return AVCENC_SKIPPED_PICTURE; /* not time to encode, thus skipping */
+ }
+
+ /* we may not need this line */
+ //nextFrmModTime = (uint32)((((frameNum+1)*1000)/rateCtrl->frame_rate) + modTimeRef); /* rec. time */
+ //encvid->nextModTime = nextFrmModTime - (encvid->frameInterval>>1) - 1; /* between current and next frame */
+
+ encvid->currInput = input;
+ encvid->currInput->coding_order = frameNum;
+
+RECALL_INITFRAME:
+ /* initialize and analyze the frame */
+ status = InitFrame(encvid);
+
+ if (status == AVCENC_SUCCESS)
+ {
+ encvid->enc_state = AVCEnc_Encoding_Frame;
+ }
+ else if (status == AVCENC_NEW_IDR)
+ {
+ if (encvid->outOfBandParamSet == TRUE)
+ {
+ encvid->enc_state = AVCEnc_Encoding_Frame;
+ }
+ else // assuming that in-band paramset keeps sending new SPS and PPS.
+ {
+ encvid->enc_state = AVCEnc_Encoding_SPS;
+ //video->currSeqParams->seq_parameter_set_id++;
+ //if(video->currSeqParams->seq_parameter_set_id > 31) // range check
+ {
+ video->currSeqParams->seq_parameter_set_id = 0; // reset
+ }
+ }
+
+ video->sliceHdr->idr_pic_id++;
+ if (video->sliceHdr->idr_pic_id > 65535) // range check
+ {
+ video->sliceHdr->idr_pic_id = 0; // reset
+ }
+ }
+ /* the following logics need to be revisited */
+ else if (status == AVCENC_PICTURE_READY) // no buffers returned back to the encoder
+ {
+ encvid->enc_state = AVCEnc_WaitingForBuffer; // Input accepted but can't continue
+ // need to free up some memory before proceeding with Encode
+ }
+
+ return status; // return status, including the AVCENC_FAIL case and all 3 above.
+}
+
+/* ======================================================================== */
+/* Function : PVAVCEncodeNAL() */
+/* Date : 4/29/2004 */
+/* Purpose : To encode one NAL/slice. */
+/* In/out : */
+/* Return : AVCENC_SUCCESS for success. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, unsigned char *buffer, unsigned int *buf_nal_size, int *nal_type)
+{
+ AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
+ AVCCommonObj *video = encvid->common;
+ AVCEncBitstream *bitstream = encvid->bitstream;
+ AVCEnc_Status status;
+
+ if (encvid == NULL)
+ {
+ return AVCENC_UNINITIALIZED;
+ }
+
+ switch (encvid->enc_state)
+ {
+ case AVCEnc_Initializing:
+ return AVCENC_UNINITIALIZED;
+ case AVCEnc_Encoding_SPS:
+ /* initialized the structure */
+ BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0);
+ BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_SPS);
+
+ /* encode SPS */
+ status = EncodeSPS(encvid, bitstream);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ /* closing the NAL with trailing bits */
+ status = BitstreamTrailingBits(bitstream, buf_nal_size);
+ if (status == AVCENC_SUCCESS)
+ {
+ encvid->enc_state = AVCEnc_Encoding_PPS;
+ video->currPicParams->seq_parameter_set_id = video->currSeqParams->seq_parameter_set_id;
+ video->currPicParams->pic_parameter_set_id++;
+ *nal_type = AVC_NALTYPE_SPS;
+ *buf_nal_size = bitstream->write_pos;
+ }
+ break;
+ case AVCEnc_Encoding_PPS:
+ /* initialized the structure */
+ BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0);
+ BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_PPS);
+
+ /* encode PPS */
+ status = EncodePPS(encvid, bitstream);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ /* closing the NAL with trailing bits */
+ status = BitstreamTrailingBits(bitstream, buf_nal_size);
+ if (status == AVCENC_SUCCESS)
+ {
+ if (encvid->outOfBandParamSet == TRUE) // already extract PPS, SPS
+ {
+ encvid->enc_state = AVCEnc_Analyzing_Frame;
+ }
+ else // SetInput has been called before SPS and PPS.
+ {
+ encvid->enc_state = AVCEnc_Encoding_Frame;
+ }
+
+ *nal_type = AVC_NALTYPE_PPS;
+ *buf_nal_size = bitstream->write_pos;
+ }
+ break;
+
+ case AVCEnc_Encoding_Frame:
+ /* initialized the structure */
+ BitstreamEncInit(bitstream, buffer, *buf_nal_size, encvid->overrunBuffer, encvid->oBSize);
+ BitstreamWriteBits(bitstream, 8, (video->nal_ref_idc << 5) | (video->nal_unit_type));
+
+ /* Re-order the reference list according to the ref_pic_list_reordering() */
+ /* We don't have to reorder the list for the encoder here. This can only be done
+ after we encode this slice. We can run thru a second-pass to see if new ordering
+ would save more bits. Too much delay !! */
+ /* status = ReOrderList(video);*/
+ status = InitSlice(encvid);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ /* when we have everything, we encode the slice header */
+ status = EncodeSliceHeader(encvid, bitstream);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = AVCEncodeSlice(encvid);
+
+ video->slice_id++;
+
+ /* closing the NAL with trailing bits */
+ BitstreamTrailingBits(bitstream, buf_nal_size);
+
+ *buf_nal_size = bitstream->write_pos;
+
+ encvid->rateCtrl->numFrameBits += ((*buf_nal_size) << 3);
+
+ *nal_type = video->nal_unit_type;
+
+ if (status == AVCENC_PICTURE_READY)
+ {
+ status = RCUpdateFrame(encvid);
+ if (status == AVCENC_SKIPPED_PICTURE) /* skip current frame */
+ {
+ DPBReleaseCurrentFrame(avcHandle, video);
+ encvid->enc_state = AVCEnc_Analyzing_Frame;
+
+ return status;
+ }
+
+ /* perform loop-filtering on the entire frame */
+ DeblockPicture(video);
+
+ /* update the original frame array */
+ encvid->prevCodedFrameNum = encvid->currInput->coding_order;
+
+ /* store the encoded picture in the DPB buffer */
+ StorePictureInDPB(avcHandle, video);
+
+ if (video->currPic->isReference)
+ {
+ video->PrevRefFrameNum = video->sliceHdr->frame_num;
+ }
+
+ /* update POC related variables */
+ PostPOC(video);
+
+ encvid->enc_state = AVCEnc_Analyzing_Frame;
+ status = AVCENC_PICTURE_READY;
+
+ }
+ break;
+ default:
+ status = AVCENC_WRONG_STATE;
+ }
+
+ return status;
+}
+
+/* ======================================================================== */
+/* Function : PVAVCEncGetOverrunBuffer() */
+/* Purpose : To retrieve the overrun buffer. Check whether overrun buffer */
+/* is used or not before returning */
+/* In/out : */
+/* Return : Pointer to the internal overrun buffer. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle)
+{
+ AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
+ AVCEncBitstream *bitstream = encvid->bitstream;
+
+ if (bitstream->overrunBuffer == bitstream->bitstreamBuffer) /* OB is used */
+ {
+ return encvid->overrunBuffer;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+
+/* ======================================================================== */
+/* Function : PVAVCEncGetRecon() */
+/* Date : 4/29/2004 */
+/* Purpose : To retrieve the most recently encoded frame. */
+/* assume that user will make a copy if they want to hold on */
+/* to it. Otherwise, it is not guaranteed to be reserved. */
+/* Most applications prefer to see original frame rather than */
+/* reconstructed frame. So, we are staying aware from complex */
+/* buffering mechanism. If needed, can be added later. */
+/* In/out : */
+/* Return : AVCENC_SUCCESS for success. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon)
+{
+ AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;
+ AVCCommonObj *video = encvid->common;
+ AVCFrameStore *currFS = video->currFS;
+
+ if (encvid == NULL)
+ {
+ return AVCENC_UNINITIALIZED;
+ }
+
+ recon->YCbCr[0] = currFS->frame.Sl;
+ recon->YCbCr[1] = currFS->frame.Scb;
+ recon->YCbCr[2] = currFS->frame.Scr;
+ recon->height = currFS->frame.height;
+ recon->pitch = currFS->frame.pitch;
+ recon->disp_order = currFS->PicOrderCnt;
+ recon->coding_order = currFS->FrameNum;
+ recon->id = (uint32) currFS->base_dpb; /* use the pointer as the id */
+
+ currFS->IsOutputted |= 1;
+
+ return AVCENC_SUCCESS;
+}
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon)
+{
+ OSCL_UNUSED_ARG(avcHandle);
+ OSCL_UNUSED_ARG(recon);
+
+ return AVCENC_SUCCESS; //for now
+}
+
+/* ======================================================================== */
+/* Function : PVAVCCleanUpEncoder() */
+/* Date : 4/18/2004 */
+/* Purpose : To clean up memories allocated by PVAVCEncInitialize() */
+/* In/out : */
+/* Return : AVCENC_SUCCESS for success. */
+/* Modified : */
+/* ======================================================================== */
+OSCL_EXPORT_REF void PVAVCCleanUpEncoder(AVCHandle *avcHandle)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+ AVCCommonObj *video;
+ uint32 *userData = (uint32*) avcHandle->userData;
+
+ if (encvid != NULL)
+ {
+ CleanMotionSearchModule(avcHandle);
+
+ CleanupRateControlModule(avcHandle);
+
+ if (encvid->functionPointer != NULL)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->functionPointer);
+ }
+
+ if (encvid->min_cost)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->min_cost);
+ }
+
+ if (encvid->intraSearch)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->intraSearch);
+ }
+
+ if (encvid->mot16x16)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->mot16x16);
+ }
+
+ if (encvid->rateCtrl)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->rateCtrl);
+ }
+
+ if (encvid->overrunBuffer)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->overrunBuffer);
+ }
+
+ video = encvid->common;
+ if (video != NULL)
+ {
+ if (video->MbToSliceGroupMap)
+ {
+ avcHandle->CBAVC_Free(userData, (int)video->MbToSliceGroupMap);
+ }
+ if (video->mblock != NULL)
+ {
+ avcHandle->CBAVC_Free(userData, (int)video->mblock);
+ }
+ if (video->decPicBuf != NULL)
+ {
+ CleanUpDPB(avcHandle, video);
+ avcHandle->CBAVC_Free(userData, (int)video->decPicBuf);
+ }
+ if (video->sliceHdr != NULL)
+ {
+ avcHandle->CBAVC_Free(userData, (int)video->sliceHdr);
+ }
+ if (video->currPicParams != NULL)
+ {
+ if (video->currPicParams->slice_group_id)
+ {
+ avcHandle->CBAVC_Free(userData, (int)video->currPicParams->slice_group_id);
+ }
+
+ avcHandle->CBAVC_Free(userData, (int)video->currPicParams);
+ }
+ if (video->currSeqParams != NULL)
+ {
+ avcHandle->CBAVC_Free(userData, (int)video->currSeqParams);
+ }
+ if (encvid->bitstream != NULL)
+ {
+ avcHandle->CBAVC_Free(userData, (int)encvid->bitstream);
+ }
+ if (video != NULL)
+ {
+ avcHandle->CBAVC_Free(userData, (int)video);
+ }
+ }
+
+ avcHandle->CBAVC_Free(userData, (int)encvid);
+
+ avcHandle->AVCObject = NULL;
+ }
+
+ return ;
+}
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate)
+{
+ OSCL_UNUSED_ARG(avcHandle);
+ OSCL_UNUSED_ARG(bitrate);
+
+ return AVCENC_FAIL;
+}
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom)
+{
+ OSCL_UNUSED_ARG(avcHandle);
+ OSCL_UNUSED_ARG(num);
+ OSCL_UNUSED_ARG(denom);
+
+ return AVCENC_FAIL;
+}
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval)
+{
+ OSCL_UNUSED_ARG(avcHandle);
+ OSCL_UNUSED_ARG(IDRInterval);
+
+ return AVCENC_FAIL;
+}
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle)
+{
+ OSCL_UNUSED_ARG(avcHandle);
+
+ return AVCENC_FAIL;
+}
+
+OSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB)
+{
+ OSCL_UNUSED_ARG(avcHandle);
+ OSCL_UNUSED_ARG(numMB);
+
+ return AVCENC_FAIL;
+}
+
+void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+
+ avcStats->avgFrameQP = GetAvgFrameQP(rateCtrl);
+ avcStats->numIntraMBs = encvid->numIntraMB;
+
+ return ;
+}
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_api.h b/media/libstagefright/codecs/avc/enc/src/avcenc_api.h
new file mode 100644
index 0000000..6841ec3
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/avcenc_api.h
@@ -0,0 +1,323 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+/**
+This file contains application function interfaces to the AVC encoder library
+and necessary type defitionitions and enumerations.
+@publishedAll
+*/
+
+#ifndef AVCENC_API_H_INCLUDED
+#define AVCENC_API_H_INCLUDED
+
+#ifndef AVCAPI_COMMON_H_INCLUDED
+#include "avcapi_common.h"
+#endif
+
+// For memset, etc
+#include <string.h>
+
+/**
+ This enumeration is used for the status returned from the library interface.
+*/
+typedef enum
+{
+ /**
+ Fail information, need to add more error code for more specific info
+ */
+ AVCENC_TRAILINGONES_FAIL = -35,
+ AVCENC_SLICE_EMPTY = -34,
+ AVCENC_POC_FAIL = -33,
+ AVCENC_CONSECUTIVE_NONREF = -32,
+ AVCENC_CABAC_FAIL = -31,
+ AVCENC_PRED_WEIGHT_TAB_FAIL = -30,
+ AVCENC_DEC_REF_PIC_MARK_FAIL = -29,
+ AVCENC_SPS_FAIL = -28,
+ AVCENC_BITSTREAM_BUFFER_FULL = -27,
+ AVCENC_BITSTREAM_INIT_FAIL = -26,
+ AVCENC_CHROMA_QP_FAIL = -25,
+ AVCENC_INIT_QS_FAIL = -24,
+ AVCENC_INIT_QP_FAIL = -23,
+ AVCENC_WEIGHTED_BIPRED_FAIL = -22,
+ AVCENC_INVALID_INTRA_PERIOD = -21,
+ AVCENC_INVALID_CHANGE_RATE = -20,
+ AVCENC_INVALID_BETA_OFFSET = -19,
+ AVCENC_INVALID_ALPHA_OFFSET = -18,
+ AVCENC_INVALID_DEBLOCK_IDC = -17,
+ AVCENC_INVALID_REDUNDANT_PIC = -16,
+ AVCENC_INVALID_FRAMERATE = -15,
+ AVCENC_INVALID_NUM_SLICEGROUP = -14,
+ AVCENC_INVALID_POC_LSB = -13,
+ AVCENC_INVALID_NUM_REF = -12,
+ AVCENC_INVALID_FMO_TYPE = -11,
+ AVCENC_ENCPARAM_MEM_FAIL = -10,
+ AVCENC_LEVEL_NOT_SUPPORTED = -9,
+ AVCENC_LEVEL_FAIL = -8,
+ AVCENC_PROFILE_NOT_SUPPORTED = -7,
+ AVCENC_TOOLS_NOT_SUPPORTED = -6,
+ AVCENC_WRONG_STATE = -5,
+ AVCENC_UNINITIALIZED = -4,
+ AVCENC_ALREADY_INITIALIZED = -3,
+ AVCENC_NOT_SUPPORTED = -2,
+ AVCENC_MEMORY_FAIL = AVC_MEMORY_FAIL,
+ AVCENC_FAIL = AVC_FAIL,
+ /**
+ Generic success value
+ */
+ AVCENC_SUCCESS = AVC_SUCCESS,
+ AVCENC_PICTURE_READY = 2,
+ AVCENC_NEW_IDR = 3, /* upon getting this, users have to call PVAVCEncodeSPS and PVAVCEncodePPS to get a new SPS and PPS*/
+ AVCENC_SKIPPED_PICTURE = 4 /* continuable error message */
+
+} AVCEnc_Status;
+
+#define MAX_NUM_SLICE_GROUP 8 /* maximum for all the profiles */
+
+/**
+This structure contains the encoding parameters.
+*/
+typedef struct tagAVCEncParam
+{
+ /* if profile/level is set to zero, encoder will choose the closest one for you */
+ AVCProfile profile; /* profile of the bitstream to be compliant with*/
+ AVCLevel level; /* level of the bitstream to be compliant with*/
+
+ int width; /* width of an input frame in pixel */
+ int height; /* height of an input frame in pixel */
+
+ int poc_type; /* picture order count mode, 0,1 or 2 */
+ /* for poc_type == 0 */
+ uint log2_max_poc_lsb_minus_4; /* specify maximum value of POC Lsb, range 0..12*/
+ /* for poc_type == 1 */
+ uint delta_poc_zero_flag; /* delta POC always zero */
+ int offset_poc_non_ref; /* offset for non-reference pic */
+ int offset_top_bottom; /* offset between top and bottom field */
+ uint num_ref_in_cycle; /* number of reference frame in one cycle */
+ int *offset_poc_ref; /* array of offset for ref pic, dimension [num_ref_in_cycle] */
+
+ int num_ref_frame; /* number of reference frame used */
+ int num_slice_group; /* number of slice group */
+ int fmo_type; /* 0: interleave, 1: dispersed, 2: foreground with left-over
+ 3: box-out, 4:raster scan, 5:wipe, 6:explicit */
+ /* for fmo_type == 0 */
+ uint run_length_minus1[MAX_NUM_SLICE_GROUP]; /* array of size num_slice_group, in round robin fasion */
+ /* fmo_type == 2*/
+ uint top_left[MAX_NUM_SLICE_GROUP-1]; /* array of co-ordinates of each slice_group */
+ uint bottom_right[MAX_NUM_SLICE_GROUP-1]; /* except the last one which is the background. */
+ /* fmo_type == 3,4,5 */
+ AVCFlag change_dir_flag; /* slice group change direction flag */
+ uint change_rate_minus1;
+ /* fmo_type == 6 */
+ uint *slice_group; /* array of size MBWidth*MBHeight */
+
+ AVCFlag db_filter; /* enable deblocking loop filter */
+ int disable_db_idc; /* 0: filter everywhere, 1: no filter, 2: no filter across slice boundary */
+ int alpha_offset; /* alpha offset range -6,...,6 */
+ int beta_offset; /* beta offset range -6,...,6 */
+
+ AVCFlag constrained_intra_pred; /* constrained intra prediction flag */
+
+ AVCFlag auto_scd; /* scene change detection on or off */
+ int idr_period; /* idr frame refresh rate in number of target encoded frame (no concept of actual time).*/
+ int intramb_refresh; /* minimum number of intra MB per frame */
+ AVCFlag data_par; /* enable data partitioning */
+
+ AVCFlag fullsearch; /* enable full-pel full-search mode */
+ int search_range; /* search range for motion vector in (-search_range,+search_range) pixels */
+ AVCFlag sub_pel; /* enable sub pel prediction */
+ AVCFlag submb_pred; /* enable sub MB partition mode */
+ AVCFlag rdopt_mode; /* RD optimal mode selection */
+ AVCFlag bidir_pred; /* enable bi-directional for B-slice, this flag forces the encoder to encode
+ any frame with POC less than the previously encoded frame as a B-frame.
+ If it's off, then such frames will remain P-frame. */
+
+ AVCFlag rate_control; /* rate control enable, on: RC on, off: constant QP */
+ int initQP; /* initial QP */
+ uint32 bitrate; /* target encoding bit rate in bits/second */
+ uint32 CPB_size; /* coded picture buffer in number of bits */
+ uint32 init_CBP_removal_delay; /* initial CBP removal delay in msec */
+
+ uint32 frame_rate; /* frame rate in the unit of frames per 1000 second */
+ /* note, frame rate is only needed by the rate control, AVC is timestamp agnostic. */
+
+ AVCFlag out_of_band_param_set; /* flag to set whether param sets are to be retrieved up front or not */
+
+ AVCFlag use_overrun_buffer; /* do not throw away the frame if output buffer is not big enough.
+ copy excess bits to the overrun buffer */
+} AVCEncParams;
+
+
+/**
+This structure contains current frame encoding statistics for debugging purpose.
+*/
+typedef struct tagAVCEncFrameStats
+{
+ int avgFrameQP; /* average frame QP */
+ int numIntraMBs; /* number of intra MBs */
+ int numFalseAlarm;
+ int numMisDetected;
+ int numDetected;
+
+} AVCEncFrameStats;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+ /** THE FOLLOWINGS ARE APIS */
+ /**
+ This function initializes the encoder library. It verifies the validity of the
+ encoding parameters against the specified profile/level and the list of supported
+ tools by this library. It allocates necessary memories required to perform encoding.
+ For re-encoding application, if users want to setup encoder in a more precise way,
+ users can give the external SPS and PPS to the encoder to follow.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ \param "encParam" "Pointer to the encoding parameter structure."
+ \param "extSPS" "External SPS used for re-encoding purpose. NULL if not present"
+ \param "extPPS" "External PPS used for re-encoding purpose. NULL if not present"
+ \return "AVCENC_SUCCESS for success,
+ AVCENC_NOT_SUPPORTED for the use of unsupported tools,
+ AVCENC_MEMORY_FAIL for memory allocation failure,
+ AVCENC_FAIL for generic failure."
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam, void* extSPS, void* extPPS);
+
+
+ /**
+ Since the output buffer size is not known prior to encoding a frame, users need to
+ allocate big enough buffer otherwise, that frame will be dropped. This function returns
+ the size of the output buffer to be allocated by the users that guarantees to hold one frame.
+ It follows the CPB spec for a particular level. However, when the users set use_overrun_buffer
+ flag, this API is useless as excess output bits are saved in the overrun buffer waiting to be
+ copied out in small chunks, i.e. users can allocate any size of output buffer.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ \param "size" "Pointer to the size to be modified."
+ \return "AVCENC_SUCCESS for success, AVCENC_UNINITIALIZED when level is not known.
+ */
+
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size);
+
+ /**
+ Users call this function to provide an input structure to the encoder library which will keep
+ a list of input structures it receives in case the users call this function many time before
+ calling PVAVCEncodeSlice. The encoder library will encode them according to the frame_num order.
+ Users should not modify the content of a particular frame until this frame is encoded and
+ returned thru CBAVCEnc_ReturnInput() callback function.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ \param "input" "Pointer to the input structure."
+ \return "AVCENC_SUCCESS for success,
+ AVCENC_FAIL if the encoder is not in the right state to take a new input frame.
+ AVCENC_NEW_IDR for the detection or determination of a new IDR, with this status,
+ the returned NAL is an SPS NAL,
+ AVCENC_NO_PICTURE if the input frame coding timestamp is too early, users must
+ get next frame or adjust the coding timestamp."
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input);
+
+ /**
+ This function is called to encode a NAL unit which can be an SPS NAL, a PPS NAL or
+ a VCL (video coding layer) NAL which contains one slice of data. It could be a
+ fixed number of macroblocks, as specified in the encoder parameters set, or the
+ maximum number of macroblocks fitted into the given input argument "buffer". The
+ input frame is taken from the oldest unencoded input frame retrieved by users by
+ PVAVCEncGetInput API.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ \param "buffer" "Pointer to the output AVC bitstream buffer, the format will be EBSP,
+ not RBSP."
+ \param "buf_nal_size" "As input, the size of the buffer in bytes.
+ This is the physical limitation of the buffer. As output, the size of the EBSP."
+ \param "nal_type" "Pointer to the NAL type of the returned buffer."
+ \return "AVCENC_SUCCESS for success of encoding one slice,
+ AVCENC_PICTURE_READY for the completion of a frame encoding,
+ AVCENC_FAIL for failure (this should not occur, though)."
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, uint8 *buffer, uint *buf_nal_size, int *nal_type);
+
+ /**
+ This function sniffs the nal_unit_type such that users can call corresponding APIs.
+ This function is identical to PVAVCDecGetNALType() in the decoder.
+ \param "bitstream" "Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.)."
+ \param "size" "size of the bitstream (NumBytesInNALunit + 1)."
+ \param "nal_unit_type" "Pointer to the return value of nal unit type."
+ \return "AVCENC_SUCCESS if success, AVCENC_FAIL otherwise."
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc);
+
+ /**
+ This function returns the pointer to internal overrun buffer. Users can call this to query
+ whether the overrun buffer has been used to encode the current NAL.
+ \param "avcHandle" "Pointer to the handle."
+ \return "Pointer to overrun buffer if it is used, otherwise, NULL."
+ */
+ OSCL_IMPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle);
+
+ /**
+ This function returns the reconstructed frame of the most recently encoded frame.
+ Note that this frame is not returned to the users yet. Users should only read the
+ content of this frame.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ \param "output" "Pointer to the input structure."
+ \return "AVCENC_SUCCESS for success, AVCENC_NO_PICTURE if no picture to be outputted."
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon);
+
+ /**
+ This function is used to return the recontructed frame back to the AVC encoder library
+ in order to be re-used for encoding operation. If users want the content of it to remain
+ unchanged for a long time, they should make a copy of it and release the memory back to
+ the encoder. The encoder relies on the id element in the AVCFrameIO structure,
+ thus users should not change the id value.
+ \param "avcHandle" "Handle to the AVC decoder library object."
+ \param "output" "Pointer to the AVCFrameIO structure."
+ \return "AVCENC_SUCCESS for success, AVCENC_FAIL for fail for id not found."
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon);
+
+ /**
+ This function performs clean up operation including memory deallocation.
+ The encoder will also clear the list of input structures it has not released.
+ This implies that users must keep track of the number of input structure they have allocated
+ and free them accordingly.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ */
+ OSCL_IMPORT_REF void PVAVCCleanUpEncoder(AVCHandle *avcHandle);
+
+ /**
+ This function extracts statistics of the current frame. If the encoder has not finished
+ with the current frame, the result is not accurate.
+ \param "avcHandle" "Handle to the AVC encoder library object."
+ \param "avcStats" "Pointer to AVCEncFrameStats structure."
+ \return "void."
+ */
+ void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats);
+
+ /**
+ These functions are used for the modification of encoding parameters.
+ To be polished.
+ */
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate);
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom);
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval);
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle);
+ OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _AVCENC_API_H_ */
+
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_int.h b/media/libstagefright/codecs/avc/enc/src/avcenc_int.h
new file mode 100644
index 0000000..3fe08a1
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/avcenc_int.h
@@ -0,0 +1,471 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+/**
+This file contains application function interfaces to the AVC encoder library
+and necessary type defitionitions and enumerations.
+@publishedAll
+*/
+
+#ifndef AVCENC_INT_H_INCLUDED
+#define AVCENC_INT_H_INCLUDED
+
+#ifndef AVCINT_COMMON_H_INCLUDED
+#include "avcint_common.h"
+#endif
+#ifndef AVCENC_API_H_INCLUDED
+#include "avcenc_api.h"
+#endif
+
+typedef float OsclFloat;
+
+/* Definition for the structures below */
+#define DEFAULT_ATTR 0 /* default memory attribute */
+#define MAX_INPUT_FRAME 30 /* some arbitrary number, it can be much higher than this. */
+#define MAX_REF_FRAME 16 /* max size of the RefPicList0 and RefPicList1 */
+#define MAX_REF_PIC_LIST 33
+
+#define MIN_QP 0
+#define MAX_QP 51
+#define SHIFT_QP 12
+#define LAMBDA_ACCURACY_BITS 16
+#define LAMBDA_FACTOR(lambda) ((int)((double)(1<<LAMBDA_ACCURACY_BITS)*lambda+0.5))
+
+
+#define DISABLE_THRESHOLDING 0
+// for better R-D performance
+#define _LUMA_COEFF_COST_ 4 //!< threshold for luma coeffs
+#define _CHROMA_COEFF_COST_ 4 //!< threshold for chroma coeffs, used to be 7
+#define _LUMA_MB_COEFF_COST_ 5 //!< threshold for luma coeffs of inter Macroblocks
+#define _LUMA_8x8_COEFF_COST_ 5 //!< threshold for luma coeffs of 8x8 Inter Partition
+#define MAX_VALUE 999999 //!< used for start value for some variables
+
+#define WEIGHTED_COST(factor,bits) (((factor)*(bits))>>LAMBDA_ACCURACY_BITS)
+#define MV_COST(f,s,cx,cy,px,py) (WEIGHTED_COST(f,mvbits[((cx)<<(s))-px]+mvbits[((cy)<<(s))-py]))
+#define MV_COST_S(f,cx,cy,px,py) (WEIGHTED_COST(f,mvbits[cx-px]+mvbits[cy-py]))
+
+/* for sub-pel search and interpolation */
+#define SUBPEL_PRED_BLK_SIZE 576 // 24x24
+#define REF_CENTER 75
+#define V2Q_H0Q 1
+#define V0Q_H2Q 2
+#define V2Q_H2Q 3
+
+/*
+#define V3Q_H0Q 1
+#define V3Q_H1Q 2
+#define V0Q_H1Q 3
+#define V1Q_H1Q 4
+#define V1Q_H0Q 5
+#define V1Q_H3Q 6
+#define V0Q_H3Q 7
+#define V3Q_H3Q 8
+#define V2Q_H3Q 9
+#define V2Q_H0Q 10
+#define V2Q_H1Q 11
+#define V2Q_H2Q 12
+#define V3Q_H2Q 13
+#define V0Q_H2Q 14
+#define V1Q_H2Q 15
+*/
+
+
+#define DEFAULT_OVERRUN_BUFFER_SIZE 1000
+
+// associated with the above cost model
+const uint8 COEFF_COST[2][16] =
+{
+ {3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
+};
+
+
+
+//! convert from H.263 QP to H.264 quant given by: quant=pow(2,QP/6)
+const int QP2QUANT[40] =
+{
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 4, 4, 4, 5, 6,
+ 6, 7, 8, 9, 10, 11, 13, 14,
+ 16, 18, 20, 23, 25, 29, 32, 36,
+ 40, 45, 51, 57, 64, 72, 81, 91
+};
+
+
+/**
+This enumeration keeps track of the internal status of the encoder whether it is doing
+something. The encoding flow follows the order in which these states are.
+@publishedAll
+*/
+typedef enum
+{
+ AVCEnc_Initializing = 0,
+ AVCEnc_Encoding_SPS,
+ AVCEnc_Encoding_PPS,
+ AVCEnc_Analyzing_Frame,
+ AVCEnc_WaitingForBuffer, // pending state
+ AVCEnc_Encoding_Frame,
+} AVCEnc_State ;
+
+/**
+Bitstream structure contains bitstream related parameters such as the pointer
+to the buffer, the current byte position and bit position. The content of the
+bitstreamBuffer will be in EBSP format as the emulation prevention codes are
+automatically inserted as the RBSP is recorded.
+@publishedAll
+*/
+typedef struct tagEncBitstream
+{
+ uint8 *bitstreamBuffer; /* pointer to buffer memory */
+ int buf_size; /* size of the buffer memory */
+ int write_pos; /* next position to write to bitstreamBuffer */
+ int count_zeros; /* count number of consecutive zero */
+ uint current_word; /* byte-swapped (MSB left) current word to write to buffer */
+ int bit_left; /* number of bit left in current_word */
+ uint8 *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/
+ int oBSize; /* size of allocated overrun buffer */
+ void *encvid; /* pointer to the main object */
+
+} AVCEncBitstream;
+
+/**
+This structure is used for rate control purpose and other performance related control
+variables such as, RD cost, statistics, motion search stuffs, etc.
+should be in this structure.
+@publishedAll
+*/
+
+
+typedef struct tagRDInfo
+{
+ int QP;
+ int actual_bits;
+ OsclFloat mad;
+ OsclFloat R_D;
+} RDInfo;
+
+typedef struct tagMultiPass
+{
+ /* multipass rate control data */
+ int target_bits; /* target bits for current frame, = rc->T */
+ int actual_bits; /* actual bits for current frame obtained after encoding, = rc->Rc*/
+ int QP; /* quantization level for current frame, = rc->Qc*/
+ int prev_QP; /* quantization level for previous frame */
+ int prev_prev_QP; /* quantization level for previous frame before last*/
+ OsclFloat mad; /* mad for current frame, = video->avgMAD*/
+ int bitrate; /* bitrate for current frame */
+ OsclFloat framerate; /* framerate for current frame*/
+
+ int nRe_Quantized; /* control variable for multipass encoding, */
+ /* 0 : first pass */
+ /* 1 : intermediate pass(quantization and VLC loop only) */
+ /* 2 : final pass(de-quantization, idct, etc) */
+ /* 3 : macroblock level rate control */
+
+ int encoded_frames; /* counter for all encoded frames */
+ int re_encoded_frames; /* counter for all multipass encoded frames*/
+ int re_encoded_times; /* counter for all times of multipass frame encoding */
+
+ /* Multiple frame prediction*/
+ RDInfo **pRDSamples; /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/
+ int framePos; /* specific position in previous multiple frames*/
+ int frameRange; /* number of overall previous multiple frames */
+ int samplesPerFrame[30]; /* number of samples per frame, 30->30fps */
+
+ /* Bit allocation for scene change frames and high motion frames */
+ OsclFloat sum_mad;
+ int counter_BTsrc; /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */
+ int counter_BTdst; /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */
+ OsclFloat sum_QP;
+ int diff_counter; /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */
+
+ /* For target bitrate or framerate update */
+ OsclFloat target_bits_per_frame; /* = C = bitrate/framerate */
+ OsclFloat target_bits_per_frame_prev; /* previous C */
+ OsclFloat aver_mad; /* so-far average mad could replace sum_mad */
+ OsclFloat aver_mad_prev; /* previous average mad */
+ int overlapped_win_size; /* transition period of time */
+ int encoded_frames_prev; /* previous encoded_frames */
+} MultiPass;
+
+
+typedef struct tagdataPointArray
+{
+ int Qp;
+ int Rp;
+ OsclFloat Mp; /* for MB-based RC */
+ struct tagdataPointArray *next;
+ struct tagdataPointArray *prev;
+} dataPointArray;
+
+typedef struct tagAVCRateControl
+{
+
+ /* these parameters are initialized by the users AVCEncParams */
+ /* bitrate-robustness tradeoff */
+ uint scdEnable; /* enable scene change detection */
+ int idrPeriod; /* IDR period in number of frames */
+ int intraMBRate; /* intra MB refresh rate per frame */
+ uint dpEnable; /* enable data partitioning */
+
+ /* quality-complexity tradeoff */
+ uint subPelEnable; /* enable quarter pel search */
+ int mvRange; /* motion vector search range in +/- pixel */
+ uint subMBEnable; /* enable sub MB prediction mode (4x4, 4x8, 8x4) */
+ uint rdOptEnable; /* enable RD-opt mode selection */
+ uint twoPass; /* flag for 2 pass encoding ( for future )*/
+ uint bidirPred; /* bi-directional prediction for B-frame. */
+
+ uint rcEnable; /* enable rate control, '1' on, '0' const QP */
+ int initQP; /* initial QP */
+
+ /* note the following 3 params are for HRD, these triplets can be a series
+ of triplets as the generalized HRD allows. SEI message must be generated in this case. */
+ /* We no longer have to differentiate between CBR and VBR. The users to the
+ AVC encoder lib will do the mapping from CBR/VBR to these parameters. */
+ int32 bitRate; /* target bit rate for the overall clip in bits/second*/
+ int32 cpbSize; /* coded picture buffer size in bytes */
+ int32 initDelayOffset; /* initial CBP removal delay in bits */
+
+ OsclFloat frame_rate; /* frame rate */
+ int srcInterval; /* source frame rate in msec */
+ int basicUnit; /* number of macroblocks per BU */
+
+ /* Then internal parameters for the operation */
+ uint first_frame; /* a flag for the first frame */
+ int lambda_mf; /* for example */
+ int totalSAD; /* SAD of current frame */
+
+ /*******************************************/
+ /* this part comes from MPEG4 rate control */
+ int alpha; /* weight for I frame */
+ int Rs; /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */
+ int Rc; /*bits used for the current frame. It is the bit count obtained after encoding. */
+ int Rp; /*bits to be removed from the buffer per picture. */
+ /*? is this the average one, or just the bits coded for the previous frame */
+ int Rps; /*bit to be removed from buffer per src frame */
+ OsclFloat Ts; /*number of seconds for the sequence (or segment). e.g., 10 sec */
+ OsclFloat Ep;
+ OsclFloat Ec; /*mean absolute difference for the current frame after motion compensation.*/
+ /*If the macroblock is intra coded, the original spatial pixel values are summed.*/
+ int Qc; /*quantization level used for the current frame. */
+ int Nr; /*number of P frames remaining for encoding.*/
+ int Rr; /*number of bits remaining for encoding this sequence (or segment).*/
+ int Rr_Old;
+ int T; /*target bit to be used for the current frame.*/
+ int S; /*number of bits used for encoding the previous frame.*/
+ int Hc; /*header and motion vector bits used in the current frame. It includes all the information except to the residual information.*/
+ int Hp; /*header and motion vector bits used in the previous frame. It includes all the information except to the residual information.*/
+ int Ql; /*quantization level used in the previous frame */
+ int Bs; /*buffer size e.g., R/2 */
+ int B; /*current buffer level e.g., R/4 - start from the middle of the buffer */
+ OsclFloat X1;
+ OsclFloat X2;
+ OsclFloat X11;
+ OsclFloat M; /*safe margin for the buffer */
+ OsclFloat smTick; /*ratio of src versus enc frame rate */
+ double remnant; /*remainder frame of src/enc frame for fine frame skipping */
+ int timeIncRes; /* vol->timeIncrementResolution */
+
+ dataPointArray *end; /*quantization levels for the past (20) frames */
+
+ int frameNumber; /* ranging from 0 to 20 nodes*/
+ int w;
+ int Nr_Original;
+ int Nr_Old, Nr_Old2;
+ int skip_next_frame;
+ int Qdep; /* smooth Q adjustment */
+ int VBR_Enabled;
+
+ int totalFrameNumber; /* total coded frames, for debugging!!*/
+
+ char oFirstTime;
+
+ int numFrameBits; /* keep track of number of bits of the current frame */
+ int NumberofHeaderBits;
+ int NumberofTextureBits;
+ int numMBHeaderBits;
+ int numMBTextureBits;
+ double *MADofMB;
+ int32 bitsPerFrame;
+
+ /* BX rate control, something like TMN8 rate control*/
+
+ MultiPass *pMP;
+
+ int TMN_W;
+ int TMN_TH;
+ int VBV_fullness;
+ int max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/
+ int encoded_frames; /* counter for all encoded frames */
+ int low_bound; /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */
+ int VBV_fullness_offset; /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/
+ /* End BX */
+
+} AVCRateControl;
+
+
+/**
+This structure is for the motion vector information. */
+typedef struct tagMV
+{
+ int x;
+ int y;
+ uint sad;
+} AVCMV;
+
+/**
+This structure contains function pointers for different platform dependent implementation of
+functions. */
+typedef struct tagAVCEncFuncPtr
+{
+
+ int (*SAD_MB_HalfPel[4])(uint8*, uint8*, int, void *);
+ int (*SAD_Macroblock)(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+
+} AVCEncFuncPtr;
+
+/**
+This structure contains information necessary for correct padding.
+*/
+typedef struct tagPadInfo
+{
+ int i;
+ int width;
+ int j;
+ int height;
+} AVCPadInfo;
+
+
+#ifdef HTFM
+typedef struct tagHTFM_Stat
+{
+ int abs_dif_mad_avg;
+ uint countbreak;
+ int offsetArray[16];
+ int offsetRef[16];
+} HTFM_Stat;
+#endif
+
+
+/**
+This structure is the main object for AVC encoder library providing access to all
+global variables. It is allocated at PVAVCInitEncoder and freed at PVAVCCleanUpEncoder.
+@publishedAll
+*/
+typedef struct tagEncObject
+{
+
+ AVCCommonObj *common;
+
+ AVCEncBitstream *bitstream; /* for current NAL */
+ uint8 *overrunBuffer; /* extra output buffer to prevent current skip due to output buffer overrun*/
+ int oBSize; /* size of allocated overrun buffer */
+
+ /* rate control */
+ AVCRateControl *rateCtrl; /* pointer to the rate control structure */
+
+ /* encoding operation */
+ AVCEnc_State enc_state; /* encoding state */
+
+ AVCFrameIO *currInput; /* pointer to the current input frame */
+
+ int currSliceGroup; /* currently encoded slice group id */
+
+ int level[24][16], run[24][16]; /* scratch memory */
+ int leveldc[16], rundc[16]; /* for DC component */
+ int levelcdc[16], runcdc[16]; /* for chroma DC component */
+ int numcoefcdc[2]; /* number of coefficient for chroma DC */
+ int numcoefdc; /* number of coefficients for DC component */
+
+ int qp_const;
+ int qp_const_c;
+ /********* intra prediction scratch memory **********************/
+ uint8 pred_i16[AVCNumI16PredMode][256]; /* save prediction for MB */
+ uint8 pred_i4[AVCNumI4PredMode][16]; /* save prediction for blk */
+ uint8 pred_ic[AVCNumIChromaMode][128]; /* for 2 chroma */
+
+ int mostProbableI4Mode[16]; /* in raster scan order */
+ /********* motion compensation related variables ****************/
+ AVCMV *mot16x16; /* Saved motion vectors for 16x16 block*/
+ AVCMV(*mot16x8)[2]; /* Saved motion vectors for 16x8 block*/
+ AVCMV(*mot8x16)[2]; /* Saved motion vectors for 8x16 block*/
+ AVCMV(*mot8x8)[4]; /* Saved motion vectors for 8x8 block*/
+
+ /********* subpel position **************************************/
+ uint32 subpel_pred[SUBPEL_PRED_BLK_SIZE/*<<2*/]; /* all 16 sub-pel positions */
+ uint8 *hpel_cand[9]; /* pointer to half-pel position */
+ int best_hpel_pos; /* best position */
+ uint8 qpel_cand[8][24*16]; /* pointer to quarter-pel position */
+ int best_qpel_pos;
+ uint8 *bilin_base[9][4]; /* pointer to 4 position at top left of bilinear quarter-pel */
+
+ /* need for intra refresh rate */
+ uint8 *intraSearch; /* Intra Array for MBs to be intra searched */
+ uint firstIntraRefreshMBIndx; /* keep track for intra refresh */
+
+ int i4_sad; /* temporary for i4 mode SAD */
+ int *min_cost; /* Minimum cost for the all MBs */
+ int lambda_mode; /* Lagrange parameter for mode selection */
+ int lambda_motion; /* Lagrange parameter for MV selection */
+
+ uint8 *mvbits_array; /* Table for bits spent in the cost funciton */
+ uint8 *mvbits; /* An offset to the above array. */
+
+ /* to speedup the SAD calculation */
+ void *sad_extra_info;
+ uint8 currYMB[256]; /* interleaved current macroblock in HTFM order */
+
+#ifdef HTFM
+ int nrmlz_th[48]; /* Threshold for fast SAD calculation using HTFM */
+ HTFM_Stat htfm_stat; /* For statistics collection */
+#endif
+
+ /* statistics */
+ int numIntraMB; /* keep track of number of intra MB */
+
+ /* encoding complexity control */
+ uint fullsearch_enable; /* flag to enable full-pel full-search */
+
+ /* misc.*/
+ bool outOfBandParamSet; /* flag to enable out-of-band param set */
+
+ AVCSeqParamSet extSPS; /* for external SPS */
+ AVCPicParamSet extPPS; /* for external PPS */
+
+ /* time control */
+ uint32 prevFrameNum; /* previous frame number starting from modTimeRef */
+ uint32 modTimeRef; /* Reference modTime update every I-Vop*/
+ uint32 wrapModTime; /* Offset to modTime Ref, rarely used */
+
+ uint prevProcFrameNum; /* previously processed frame number, could be skipped */
+ uint prevCodedFrameNum; /* previously encoded frame number */
+ /* POC related variables */
+ uint32 dispOrdPOCRef; /* reference POC is displayer order unit. */
+
+ /* Function pointers */
+ AVCEncFuncPtr *functionPointer; /* store pointers to platform specific functions */
+
+ /* Application control data */
+ AVCHandle *avcHandle;
+
+
+} AVCEncObject;
+
+
+#endif /*AVCENC_INT_H_INCLUDED*/
+
diff --git a/media/libstagefright/codecs/avc/enc/src/avcenc_lib.h b/media/libstagefright/codecs/avc/enc/src/avcenc_lib.h
new file mode 100644
index 0000000..17e28ef
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/avcenc_lib.h
@@ -0,0 +1,1020 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+/**
+This file contains declarations of internal functions for AVC decoder library.
+@publishedAll
+*/
+#ifndef AVCENC_LIB_H_INCLUDED
+#define AVCENC_LIB_H_INCLUDED
+
+#ifndef AVCLIB_COMMON_H_INCLUDED
+#include "avclib_common.h"
+#endif
+#ifndef AVCENC_INT_H_INCLUDED
+#include "avcenc_int.h"
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+ /*------------- block.c -------------------------*/
+
+ /**
+ This function perform residue calculation, transform, quantize, inverse quantize,
+ inverse transform and residue compensation on a 4x4 block.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "blkidx" "raster scan block index of the current 4x4 block."
+ \param "cur" "Pointer to the reconstructed block."
+ \param "org" "Pointer to the original block."
+ \param "coef_cost" "Pointer to the coefficient cost to be filled in and returned."
+ \return "Number of non-zero coefficients."
+ */
+ int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost);
+
+ /**
+ This function performs IDCT on an INTER macroblock.
+ \param "video" "Pointer to AVCCommonObj."
+ \param "curL" "Pointer to the origin of the macroblock on the current frame."
+ \param "currMB" "Pointer to the AVCMacroblock structure."
+ \param "picPitch" "Pitch of the current frame."
+ \return "void".
+ */
+ void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch);
+
+ /**
+ This function perform residue calculation, transform, quantize, inverse quantize,
+ inverse transform and residue compensation on a macroblock.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "curL" "Pointer to the reconstructed MB."
+ \param "orgL" "Pointer to the original MB."
+ \return "void"
+ */
+ void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL);
+
+ /**
+ This function perform residue calculation, transform, quantize, inverse quantize,
+ inverse transform and residue compensation for chroma components of an MB.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "curC" "Pointer to the reconstructed MB."
+ \param "orgC" "Pointer to the original MB."
+ \param "cr" "Flag whether it is Cr or not."
+ \return "void"
+ */
+ void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr);
+
+ /*----------- init.c ------------------*/
+ /**
+ This function interprets the encoding parameters provided by users in encParam.
+ The results are kept in AVCEncObject, AVCSeqParamSet, AVCPicParamSet and AVCSliceHeader.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "encParam" "Pointer to AVCEncParam."
+ \param "extSPS" "External SPS template to be followed. NULL if not present."
+ \param "extPPS" "External PPS template to be followed. NULL if not present."
+ \return "see AVCEnc_Status."
+ */
+ AVCEnc_Status SetEncodeParam(AVCHandle *avcHandle, AVCEncParams *encParam,
+ void *extSPS, void *extPPS);
+
+ /**
+ This function verifies the encoding parameters whether they meet the set of supported
+ tool by a specific profile. If the profile is not set, it will just find the closest
+ profile instead of verifying it.
+ \param "video" "Pointer to AVCEncObject."
+ \param "seqParam" "Pointer to AVCSeqParamSet."
+ \param "picParam" "Pointer to AVCPicParamSet."
+ \return "AVCENC_SUCCESS if success,
+ AVCENC_PROFILE_NOT_SUPPORTED if the specified profile
+ is not supported by this version of the library,
+ AVCENC_TOOLS_NOT_SUPPORTED if any of the specified encoding tools are
+ not supported by the user-selected profile."
+ */
+ AVCEnc_Status VerifyProfile(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam);
+
+ /**
+ This function verifies the encoding parameters whether they meet the requirement
+ for a specific level. If the level is not set, it will just find the closest
+ level instead of verifying it.
+ \param "video" "Pointer to AVCEncObject."
+ \param "seqParam" "Pointer to AVCSeqParamSet."
+ \param "picParam" "Pointer to AVCPicParamSet."
+ \return "AVCENC_SUCCESS if success,
+ AVCENC_LEVEL_NOT_SUPPORTED if the specified level
+ is not supported by this version of the library,
+ AVCENC_LEVEL_FAIL if any of the encoding parameters exceed
+ the range of the user-selected level."
+ */
+ AVCEnc_Status VerifyLevel(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam);
+
+ /**
+ This funciton initializes the frame encoding by setting poc/frame_num related parameters. it
+ also performs motion estimation.
+ \param "encvid" "Pointer to the AVCEncObject."
+ \return "AVCENC_SUCCESS if success, AVCENC_NO_PICTURE if there is no input picture
+ in the queue to encode, AVCENC_POC_FAIL or AVCENC_CONSECUTIVE_NONREF for POC
+ related errors, AVCENC_NEW_IDR if new IDR is detected."
+ */
+ AVCEnc_Status InitFrame(AVCEncObject *encvid);
+
+ /**
+ This function initializes slice header related variables and other variables necessary
+ for decoding one slice.
+ \param "encvid" "Pointer to the AVCEncObject."
+ \return "AVCENC_SUCCESS if success."
+ */
+ AVCEnc_Status InitSlice(AVCEncObject *encvid);
+
+ /*----------- header.c ----------------*/
+ /**
+ This function performs bitstream encoding of the sequence parameter set NAL.
+ \param "encvid" "Pointer to the AVCEncObject."
+ \param "stream" "Pointer to AVCEncBitstream."
+ \return "AVCENC_SUCCESS if success or AVCENC_SPS_FAIL or others for unexpected failure which
+ should not occur. The SPS parameters should all be verified before this function is called."
+ */
+ AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream);
+
+ /**
+ This function encodes the VUI parameters into the sequence parameter set bitstream.
+ \param "stream" "Pointer to AVCEncBitstream."
+ \param "vui" "Pointer to AVCVUIParams."
+ \return "nothing."
+ */
+ void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui);
+
+ /**
+ This function encodes HRD parameters into the sequence parameter set bitstream
+ \param "stream" "Pointer to AVCEncBitstream."
+ \param "hrd" "Pointer to AVCHRDParams."
+ \return "nothing."
+ */
+ void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd);
+
+
+ /**
+ This function performs bitstream encoding of the picture parameter set NAL.
+ \param "encvid" "Pointer to the AVCEncObject."
+ \param "stream" "Pointer to AVCEncBitstream."
+ \return "AVCENC_SUCCESS if success or AVCENC_PPS_FAIL or others for unexpected failure which
+ should not occur. The SPS parameters should all be verified before this function is called."
+ */
+ AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream);
+
+ /**
+ This function encodes slice header information which has been initialized or fabricated
+ prior to entering this funciton.
+ \param "encvid" "Pointer to the AVCEncObject."
+ \param "stream" "Pointer to AVCEncBitstream."
+ \return "AVCENC_SUCCESS if success or bitstream fail statuses."
+ */
+ AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream);
+
+ /**
+ This function encodes reference picture list reordering relted syntax.
+ \param "video" "Pointer to AVCCommonObj."
+ \param "stream" "Pointer to AVCEncBitstream."
+ \param "sliceHdr" "Pointer to AVCSliceHdr."
+ \param "slice_type" "Value of slice_type - 5 if greater than 5."
+ \return "AVCENC_SUCCESS for success and AVCENC_FAIL otherwise."
+ */
+ AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type);
+
+ /**
+ This function encodes dec_ref_pic_marking related syntax.
+ \param "video" "Pointer to AVCCommonObj."
+ \param "stream" "Pointer to AVCEncBitstream."
+ \param "sliceHdr" "Pointer to AVCSliceHdr."
+ \return "AVCENC_SUCCESS for success and AVCENC_FAIL otherwise."
+ */
+ AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr);
+
+ /**
+ This function initializes the POC related variables and the POC syntax to be encoded
+ to the slice header derived from the disp_order and is_reference flag of the original
+ input frame to be encoded.
+ \param "video" "Pointer to the AVCEncObject."
+ \return "AVCENC_SUCCESS if success,
+ AVCENC_POC_FAIL if the poc type is undefined or
+ AVCENC_CONSECUTIVE_NONREF if there are consecutive non-reference frame for POC type 2."
+ */
+ AVCEnc_Status InitPOC(AVCEncObject *video);
+
+ /**
+ This function performs POC related operation after a picture is decoded.
+ \param "video" "Pointer to AVCCommonObj."
+ \return "AVCENC_SUCCESS"
+ */
+ AVCEnc_Status PostPOC(AVCCommonObj *video);
+
+ /*----------- bitstream_io.c ----------------*/
+ /**
+ This function initializes the bitstream structure with the information given by
+ the users.
+ \param "bitstream" "Pointer to the AVCEncBitstream structure."
+ \param "buffer" "Pointer to the unsigned char buffer for output."
+ \param "buf_size" "The size of the buffer in bytes."
+ \param "overrunBuffer" "Pointer to extra overrun buffer."
+ \param "oBSize" "Size of overrun buffer in bytes."
+ \return "AVCENC_SUCCESS if success, AVCENC_BITSTREAM_INIT_FAIL if fail"
+ */
+ AVCEnc_Status BitstreamEncInit(AVCEncBitstream *bitstream, uint8 *buffer, int buf_size,
+ uint8 *overrunBuffer, int oBSize);
+
+ /**
+ This function writes the data from the cache into the bitstream buffer. It also adds the
+ emulation prevention code if necessary.
+ \param "stream" "Pointer to the AVCEncBitstream structure."
+ \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
+ */
+ AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream);
+
+ /**
+ This function writes the codeword into the cache which will eventually be written to
+ the bitstream buffer.
+ \param "stream" "Pointer to the AVCEncBitstream structure."
+ \param "nBits" "Number of bits in the codeword."
+ \param "code" "The codeword."
+ \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
+ */
+ AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code);
+
+ /**
+ This function writes one bit of data into the cache which will eventually be written
+ to the bitstream buffer.
+ \param "stream" "Pointer to the AVCEncBitstream structure."
+ \param "code" "The codeword."
+ \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
+ */
+ AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code);
+
+ /**
+ This function adds trailing bits to the bitstream and reports back the final EBSP size.
+ \param "stream" "Pointer to the AVCEncBitstream structure."
+ \param "nal_size" "Output the final NAL size."
+ \return "AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail."
+ */
+ AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size);
+
+ /**
+ This function checks whether the current bit position is byte-aligned or not.
+ \param "stream" "Pointer to the bitstream structure."
+ \return "true if byte-aligned, false otherwise."
+ */
+ bool byte_aligned(AVCEncBitstream *stream);
+
+
+ /**
+ This function checks the availability of overrun buffer and switches to use it when
+ normal bufffer is not big enough.
+ \param "stream" "Pointer to the bitstream structure."
+ \param "numExtraBytes" "Number of extra byte needed."
+ \return "AVCENC_SUCCESS or AVCENC_FAIL."
+ */
+ AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes);
+
+
+ /*-------------- intra_est.c ---------------*/
+
+ /** This function performs intra/inter decision based on ABE.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "min_cost" "Best inter cost."
+ \param "curL" "Pointer to the current MB origin in reconstructed frame."
+ \param "picPitch" "Pitch of the reconstructed frame."
+ \return "Boolean for intra mode."
+ */
+
+//bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch);
+ bool IntraDecision(int *min_cost, uint8 *cur, int pitch, bool ave);
+
+ /**
+ This function performs intra prediction mode search.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "mbnum" "Current MB number."
+ \param "curL" "Pointer to the current MB origin in reconstructed frame."
+ \param "picPitch" "Pitch of the reconstructed frame."
+ \return "void."
+ */
+ void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch);
+
+ /**
+ This function generates all the I16 prediction modes for an MB and keep it in
+ encvid->pred_i16.
+ \param "encvid" "Pointer to AVCEncObject."
+ \return "void"
+ */
+ void intrapred_luma_16x16(AVCEncObject *encvid);
+
+ /**
+ This function calculate the cost of all I16 modes and compare them to get the minimum.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "orgY" "Pointer to the original luma MB."
+ \param "min_cost" "Pointer to the minimal cost so-far."
+ \return "void"
+ */
+ void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost);
+
+ /**
+ This function calculates the cost of each I16 mode.
+ \param "org" "Pointer to the original luma MB."
+ \param "org_pitch" "Stride size of the original frame."
+ \param "pred" "Pointer to the prediction values."
+ \param "min_cost" "Minimal cost so-far."
+ \return "Cost"
+ */
+
+ int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost);
+
+ /**
+ This function generates all the I4 prediction modes and select the best one
+ for all the blocks inside a macroblock.It also calls dct_luma to generate the reconstructed
+ MB, and transform coefficients to be encoded.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "min_cost" "Pointer to the minimal cost so-far."
+ \return "void"
+ */
+ void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost);
+
+ /**
+ This function calculates the most probable I4 mode of a given 4x4 block
+ from neighboring informationaccording to AVC/H.264 standard.
+ \param "video" "Pointer to AVCCommonObj."
+ \param "blkidx" "The current block index."
+ \return "Most probable mode."
+ */
+ int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx);
+
+ /**
+ This function is where a lot of actions take place in the 4x4 block level inside
+ mb_intra4x4_search.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "blkidx" "The current 4x4 block index."
+ \param "cur" "Pointer to the reconstructed block."
+ \param "org" "Pointer to the original block."
+ \return "Minimal cost, also set currMB->i4Mode"
+ */
+ int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org);
+
+ /**
+ This function calculates the cost of a given I4 prediction mode.
+ \param "org" "Pointer to the original block."
+ \param "org_pitch" "Stride size of the original frame."
+ \param "pred" "Pointer to the prediction block. (encvid->pred_i4)"
+ \param "cost" "Pointer to the minimal cost (to be updated)."
+ \return "void"
+ */
+ void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost);
+
+ /**
+ This function performs chroma intra search. Each mode is saved in encvid->pred_ic.
+ \param "encvid" "Pointer to AVCEncObject."
+ \return "void"
+ */
+ void chroma_intra_search(AVCEncObject *encvid);
+
+ /**
+ This function calculates the cost of a chroma prediction mode.
+ \param "orgCb" "Pointer to the original Cb block."
+ \param "orgCr" "Pointer to the original Cr block."
+ \param "org_pitch" "Stride size of the original frame."
+ \param "pred" "Pointer to the prediction block (encvid->pred_ic)"
+ \param "mincost" "Minimal cost so far."
+ \return "Cost."
+ */
+
+ int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int mincost);
+
+ /*-------------- motion_comp.c ---------------*/
+
+ /**
+ This is a main function to peform inter prediction.
+ \param "encvid" "Pointer to AVCEncObject."
+ \param "video" "Pointer to AVCCommonObj."
+ \return "void".
+ */
+ void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video);
+
+
+ /**
+ This function is called for luma motion compensation.
+ \param "ref" "Pointer to the origin of a reference luma."
+ \param "picwidth" "Width of the picture."
+ \param "picheight" "Height of the picture."
+ \param "x_pos" "X-coordinate of the predicted block in quarter pel resolution."
+ \param "y_pos" "Y-coordinate of the predicted block in quarter pel resolution."
+ \param "pred" "Pointer to the output predicted block."
+ \param "pred_pitch" "Width of pred."
+ \param "blkwidth" "Width of the current partition."
+ \param "blkheight" "Height of the current partition."
+ \return "void"
+ */
+ void eLumaMotionComp(uint8 *ref, int picwidth, int picheight,
+ int x_pos, int y_pos,
+ uint8 *pred, int pred_pitch,
+ int blkwidth, int blkheight);
+
+ void eFullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch,
+ int blkwidth, int blkheight);
+
+ void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dx);
+
+ void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dx);
+
+ void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,
+ int blkwidth, int blkheight);
+
+ void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dy);
+
+ void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,
+ int blkwidth, int blkheight);
+
+ void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dy);
+
+ void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,
+ uint8 *out, int outpitch,
+ int blkwidth, int blkheight);
+
+ void eChromaMotionComp(uint8 *ref, int picwidth, int picheight,
+ int x_pos, int y_pos, uint8 *pred, int pred_pitch,
+ int blkwidth, int blkheight);
+
+ void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+ void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+ void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+ void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+ void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+ void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+ void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight);
+
+
+ /*-------------- motion_est.c ---------------*/
+
+ /**
+ Allocate and initialize arrays necessary for motion search algorithm.
+ \param "envid" "Pointer to AVCEncObject."
+ \return "AVC_SUCCESS or AVC_MEMORY_FAIL."
+ */
+ AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle);
+
+ /**
+ Clean up memory allocated in InitMotionSearchModule.
+ \param "envid" "Pointer to AVCEncObject."
+ \return "void."
+ */
+ void CleanMotionSearchModule(AVCHandle *avcHandle);
+
+
+ /**
+ This function performs motion estimation of all macroblocks in a frame during the InitFrame.
+ The goal is to find the best MB partition for inter and find out if intra search is needed for
+ any MBs. This intra MB tendency can be used for scene change detection.
+ \param "encvid" "Pointer to AVCEncObject."
+ \return "void"
+ */
+ void AVCMotionEstimation(AVCEncObject *encvid);
+
+ /**
+ This function performs repetitive edge padding to the reference picture by adding 16 pixels
+ around the luma and 8 pixels around the chromas.
+ \param "refPic" "Pointer to the reference picture."
+ \return "void"
+ */
+ void AVCPaddingEdge(AVCPictureData *refPic);
+
+ /**
+ This function keeps track of intra refresh macroblock locations.
+ \param "encvid" "Pointer to the global array structure AVCEncObject."
+ \param "mblock" "Pointer to the array of AVCMacroblock structures."
+ \param "totalMB" "Total number of MBs in a frame."
+ \param "numRefresh" "Number of MB to be intra refresh in a single frame."
+ \return "void"
+ */
+ void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh);
+
+#ifdef HTFM
+ void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect);
+ void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat);
+ void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[]);
+ void HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch);
+#endif
+
+ /**
+ This function reads the input MB into a smaller faster memory space to minimize the cache miss.
+ \param "encvid" "Pointer to the global AVCEncObject."
+ \param "cur" "Pointer to the original input macroblock."
+ \param "pitch" "Stride size of the input frame (luma)."
+ \return "void"
+ */
+ void AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch);
+
+ /**
+ Performs motion vector search for a macroblock.
+ \param "encvid" "Pointer to AVCEncObject structure."
+ \param "cur" "Pointer to the current macroblock in the input frame."
+ \param "best_cand" "Array of best candidates (to be filled in and returned)."
+ \param "i0" "X-coordinate of the macroblock."
+ \param "j0" "Y-coordinate of the macroblock."
+ \param "type_pred" "Indicates the type of operations."
+ \param "FS_en" "Flag for fullsearch enable."
+ \param "hp_guess" "Guess for half-pel search."
+ \return "void"
+ */
+ void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[],
+ int i0, int j0, int type_pred, int FS_en, int *hp_guess);
+
+//AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum,
+// int num_pass);
+
+ /**
+ Perform full-pel exhaustive search around the predicted MV.
+ \param "encvid" "Pointer to AVCEncObject structure."
+ \param "prev" "Pointer to the reference frame."
+ \param "cur" "Pointer to the input macroblock."
+ \param "imin" "Pointer to minimal mv (x)."
+ \param "jmin" "Pointer to minimal mv (y)."
+ \param "ilow, ihigh, jlow, jhigh" "Lower bound on search range."
+ \param "cmvx, cmvy" "Predicted MV value."
+
+ \return "The cost function of the best candidate."
+ */
+ int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur,
+ int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh,
+ int cmvx, int cmvy);
+
+ /**
+ Select candidates from neighboring blocks according to the type of the
+ prediction selection.
+ \param "mvx" "Pointer to the candidate, x-coordinate."
+ \param "mvy" "Pointer to the candidate, y-coordinate."
+ \param "num_can" "Pointer to the number of candidates returned."
+ \param "imb" "The MB index x-coordinate."
+ \param "jmb" "The MB index y-coordinate."
+ \param "type_pred" "Type of the prediction."
+ \param "cmvx, cmvy" "Pointer to predicted MV (modified version)."
+ \return "void."
+ */
+ void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb,
+ AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy);
+
+ /**
+ Utility function to move the values in the array dn according to the new
+ location to avoid redundant calculation.
+ \param "dn" "Array of integer of size 9."
+ \param "new_loc" "New location index."
+ \return "void."
+ */
+ void AVCMoveNeighborSAD(int dn[], int new_loc);
+
+ /**
+ Find minimum index of dn.
+ \param "dn" "Array of integer of size 9."
+ \return "The index of dn with the smallest dn[] value."
+ */
+ int AVCFindMin(int dn[]);
+
+
+ /*------------- findhalfpel.c -------------------*/
+
+ /**
+ Search for the best half-pel resolution MV around the full-pel MV.
+ \param "encvid" "Pointer to the global AVCEncObject structure."
+ \param "cur" "Pointer to the current macroblock."
+ \param "mot" "Pointer to the AVCMV array of the frame."
+ \param "ncand" "Pointer to the origin of the fullsearch result."
+ \param "xpos" "The current MB position in x."
+ \param "ypos" "The current MB position in y."
+ \param "hp_guess" "Input to help speedup the search."
+ \param "cmvx, cmvy" "Predicted motion vector use for mvcost."
+ \return "Minimal cost (SATD) without MV cost. (for rate control purpose)"
+ */
+ int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand,
+ int xpos, int ypos, int hp_guess, int cmvx, int cmvy);
+
+ /**
+ This function generates sub-pel pixels required to do subpel MV search.
+ \param "subpel_pred" "Pointer to 2-D array, each array for each position."
+ \param "ncand" "Pointer to the full-pel center position in ref frame."
+ \param "lx" "Pitch of the ref frame."
+ \return "void"
+ */
+ void GenerateHalfPelPred(uint8 *subpel_pred, uint8 *ncand, int lx);
+
+ /**
+ This function calculate vertical interpolation at half-point of size 4x17.
+ \param "dst" "Pointer to destination."
+ \param "ref" "Pointer to the starting reference pixel."
+ \return "void."
+ */
+ void VertInterpWClip(uint8 *dst, uint8 *ref);
+
+ /**
+ This function generates quarter-pel pixels around the best half-pel result
+ during the sub-pel MV search.
+ \param "bilin_base" "Array of pointers to be used as basis for q-pel interp."
+ \param "qpel_pred" "Array of pointers pointing to quarter-pel candidates."
+ \param "hpel_pos" "Best half-pel position at the center."
+ \return "void"
+ */
+ void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_pred, int hpel_pos);
+
+ /**
+ This function calculates the SATD of a subpel candidate.
+ \param "cand" "Pointer to a candidate."
+ \param "cur" "Pointer to the current block."
+ \param "dmin" "Min-so-far SATD."
+ \return "Sum of Absolute Transformed Difference."
+ */
+ int SATD_MB(uint8 *cand, uint8 *cur, int dmin);
+
+ /*------------- rate_control.c -------------------*/
+
+ /** This function is a utility function. It returns average QP of the previously encoded frame.
+ \param "rateCtrl" "Pointer to AVCRateControl structure."
+ \return "Average QP."
+ */
+ int GetAvgFrameQP(AVCRateControl *rateCtrl);
+
+ /**
+ This function takes the timestamp of the input and determine whether it should be encoded
+ or skipped.
+ \param "encvid" "Pointer to the AVCEncObject structure."
+ \param "rateCtrl" "Pointer to the AVCRateControl structure."
+ \param "modTime" "The 32 bit timestamp of the input frame."
+ \param "frameNum" "Pointer to the frame number if to be encoded."
+ \return "AVC_SUCCESS or else."
+ */
+ AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum);
+
+ /**
+ This function updates the buffer fullness when frames are dropped either by the
+ rate control algorithm or by the users to make sure that target bit rate is still met.
+ \param "video" "Pointer to the common object structure."
+ \param "rateCtrl" "Pointer to rate control structure."
+ \param "frameInc" "Difference of the current frame number and previous frame number."
+ \return "void."
+ */
+ void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc);
+
+ /**
+ This function initializes rate control module and allocates necessary bufferes to do the job.
+ \param "avcHandle" "Pointer to the encoder handle."
+ \return "AVCENC_SUCCESS or AVCENC_MEMORY_FAIL."
+ */
+ AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle);
+
+ /**
+ This function frees buffers allocated in InitRateControlModule.
+ \param "avcHandle" "Pointer to the encoder handle."
+ \return "void."
+ */
+ void CleanupRateControlModule(AVCHandle *avcHandle);
+
+ /**
+ This function is called at the beginning of each GOP or the first IDR frame. It calculates
+ target bits for a GOP.
+ \param "encvid" "Pointer to the encoder object."
+ \return "void."
+ */
+ void RCInitGOP(AVCEncObject *encvid);
+
+ /**
+ This function calculates target bits for a particular frame.
+ \param "video" "Pointer to the AVCEncObject structure."
+ \return "void"
+ */
+ void RCInitFrameQP(AVCEncObject *video);
+
+ /**
+ This function calculates QP for the upcoming frame or basic unit.
+ \param "encvid" "Pointer to the encoder object."
+ \param "rateCtrl" "Pointer to the rate control object."
+ \return "QP value ranging from 0-51."
+ */
+ int RCCalculateQP(AVCEncObject *encvid, AVCRateControl *rateCtrl);
+
+ /**
+ This function translates the luma QP to chroma QP and calculates lambda based on QP.
+ \param "video" "Pointer to the AVCEncObject structure."
+ \return "void"
+ */
+ void RCInitChromaQP(AVCEncObject *encvid);
+
+ /**
+ This function is called before encoding each macroblock.
+ \param "encvid" "Pointer to the encoder object."
+ \return "void."
+ */
+ void RCInitMBQP(AVCEncObject *encvid);
+
+ /**
+ This function updates bits usage stats after encoding an macroblock.
+ \param "video" "Pointer to AVCCommonObj."
+ \param "rateCtrl" "Pointer to AVCRateControl."
+ \param "num_header_bits" "Number of bits used for MB header."
+ \param "num_texture_bits" "Number of bits used for MB texture."
+ \return "void"
+ */
+ void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits);
+
+ /**
+ This function calculates the difference between prediction and original MB.
+ \param "encvid" "Pointer to the encoder object."
+ \param "currMB" "Pointer to the current macroblock structure."
+ \param "orgL" "Pointer to the original MB."
+ \param "orgPitch" "Pointer to the original picture pitch."
+ \return "void."
+ */
+ void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch);
+
+ /**
+ Restore QP related parameters of previous MB when current MB is skipped.
+ \param "currMB" "Pointer to the current macroblock."
+ \param "video" "Pointer to the common video structure."
+ \param "encvid" "Pointer to the global encoding structure."
+ \return "void"
+ */
+ void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid);
+
+ /**
+ This function is called after done with a frame.
+ \param "encvid" "Pointer to the encoder object."
+ \return "AVCENC_SUCCESS or AVCENC_SKIPPED_PICTURE when bufer overflow (need to discard current frame)."
+ */
+ AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid);
+
+ /*--------- residual.c -------------------*/
+
+ /**
+ This function encodes the intra pcm data and fill it in the corresponding location
+ on the current picture.
+ \param "video" "Pointer to AVCEncObject."
+ \return "AVCENC_SUCCESS if success, or else for bitstream errors."
+ */
+ AVCEnc_Status EncodeIntraPCM(AVCEncObject *video);
+
+ /**
+ This function performs CAVLC syntax encoding on the run and level information of the coefficients.
+ The level and run arrays are elements in AVCEncObject structure, populated by TransQuantZZ,
+ TransQuantIntraDC and TransQuantChromaDC functions.
+ \param "video" "Pointer to AVCEncObject."
+ \param "type" "One of AVCResidualType for a particular 4x4 block."
+ \param "bindx" "Block index or number of nonzero coefficients for AVC_Intra16DC and AVC_ChromaDC mode."
+ \param "currMB" "Pointer to the current macroblock structure."
+ \return "AVCENC_SUCCESS for success."
+ \Note "This function has 32-bit machine specific instruction!!!!"
+ */
+ AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int bindx, AVCMacroblock *currMB);
+
+
+ /*------------- sad.c ---------------------------*/
+
+
+ int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+
+#ifdef HTFM /* 3/2/1, Hypothesis Testing Fast Matching */
+ int AVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);
+ int AVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);
+ int AVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);
+ int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+ int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);
+#endif
+
+
+ /*------------- slice.c -------------------------*/
+
+ /**
+ This function performs the main encoding loop for a slice.
+ \param "encvid" "Pointer to AVCEncObject."
+ \return "AVCENC_SUCCESS for success, AVCENC_PICTURE_READY for end-of-picture and
+ AVCENC_FAIL or AVCENC_SLICE_EMPTY otherwise."
+ */
+ AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid);
+
+ /**
+ This function performs the main encoding operation for one macroblock.
+ \param "video" "pointer to AVCEncObject."
+ \return "AVCENC_SUCCESS for success, or other bitstream related failure status."
+ */
+ AVCEnc_Status EncodeMB(AVCEncObject *video);
+
+ /**
+ This function calls prediction INTRA/INTER functions, transform,
+ quantization and zigzag scanning to get the run-level symbols.
+ \param "encvid" "pointer to AVCEncObject."
+ \param "curL" "pointer to Luma component of the current frame.
+ \param "curCb" "pointer to Cb component of the current frame.
+ \param "curCr" "pointer to Cr component of the current frame.
+ \return "void for now."
+ */
+ void MBPredTransQuantZZ(AVCEncObject *encvid, uint8 *curL, uint8 *curCb, uint8 *curCr);
+
+ /**
+ This function copies the content of the prediction MB into the reconstructed YUV
+ frame directly.
+ \param "curL" "Pointer to the destination Y component."
+ \param "curCb" "Pointer to the destination Cb component."
+ \param "curCr" "Pointer to the destination Cr component."
+ \param "predBlock" "Pointer to the prediction MB."
+ \param "picWidth" "The width of the frame."
+ \return "None."
+ */
+ void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picWidth);
+
+ /**
+ This function encodes the mb_type, CBP, prediction mode, ref idx and MV.
+ \param "currMB" "Pointer to the current macroblock structure."
+ \param "video" "Pointer to the AVCEncObject structure."
+ \return "AVCENC_SUCCESS for success or else for fail."
+ */
+ AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *video);
+
+ /**
+ This function finds the right mb_type for a macroblock given the mbMode, CBP,
+ NumPart, PredPartMode.
+ \param "currMB" "Pointer to the current macroblock structure."
+ \param "slice_type" "Value of the slice_type."
+ \return "mb_type."
+ */
+ uint InterpretMBType(AVCMacroblock *currMB, int slice_type);
+
+ /**
+ This function encodes the mb_pred part of the macroblock data.
+ \param "video" "Pointer to the AVCCommonObj structure."
+ \param "currMB" "Pointer to the current macroblock structure."
+ \param "stream" "Pointer to the AVCEncBitstream structure."
+ \return "AVCENC_SUCCESS for success or bitstream fail status."
+ */
+ AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);
+
+ /**
+ This function encodes the sub_mb_pred part of the macroblock data.
+ \param "video" "Pointer to the AVCCommonObj structure."
+ \param "currMB" "Pointer to the current macroblock structure."
+ \param "stream" "Pointer to the AVCEncBitstream structure."
+ \return "AVCENC_SUCCESS for success or bitstream fail status."
+ */
+ AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);
+
+ /**
+ This function interprets the sub_mb_type and sets necessary information
+ when the slice type is AVC_P_SLICE.
+ in the macroblock structure.
+ \param "mblock" "Pointer to current AVCMacroblock."
+ \param "sub_mb_type" "From the syntax bitstream."
+ \return "void"
+ */
+ void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type);
+
+ /**
+ This function interprets the sub_mb_type and sets necessary information
+ when the slice type is AVC_B_SLICE.
+ in the macroblock structure.
+ \param "mblock" "Pointer to current AVCMacroblock."
+ \param "sub_mb_type" "From the syntax bitstream."
+ \return "void"
+ */
+ void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type);
+
+ /**
+ This function encodes intra 4x4 mode. It calculates the predicted I4x4 mode and the
+ remnant to be encoded.
+ \param "video" "Pointer to AVCEncObject structure."
+ \param "currMB" "Pointer to the AVCMacroblock structure."
+ \param "stream" "Pointer to AVCEncBitstream sructure."
+ \return "AVCENC_SUCCESS for success."
+ */
+ AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);
+
+ /*------------- vlc_encode.c -----------------------*/
+ /**
+ This function encodes and writes a value into an Exp-Golomb codeword.
+ \param "bitstream" "Pointer to AVCEncBitstream."
+ \param "codeNum" "Pointer to the value of the codeNum."
+ \return "AVCENC_SUCCESS for success or bitstream error messages for fail."
+ */
+ AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum);
+
+ /**
+ This function maps and encodes signed Exp-Golomb codes.
+ \param "bitstream" "Pointer to AVCEncBitstream."
+ \param "value" "Pointer to syntax element value."
+ \return "AVCENC_SUCCESS or AVCENC_FAIL."
+ */
+ AVCEnc_Status se_v(AVCEncBitstream *bitstream, int value);
+
+ /**
+ This function maps and encodes truncated Exp-Golomb codes.
+ \param "bitstream" "Pointer to AVCEncBitstream."
+ \param "value" "Pointer to syntax element value."
+ \param "range" "Range of the value as input to determine the algorithm."
+ \return "AVCENC_SUCCESS or AVCENC_FAIL."
+ */
+ AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range);
+
+ /**
+ This function creates Exp-Golomb codeword from codeNum.
+ \param "bitstream" "Pointer to AVCEncBitstream."
+ \param "codeNum" "Pointer to the codeNum value."
+ \return "AVCENC_SUCCESS for success or bitstream error messages for fail."
+ */
+ AVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum);
+
+ /**
+ This function performs CAVLC encoding of the CBP (coded block pattern) of a macroblock
+ by calling ue_v() and then mapping the CBP to the corresponding VLC codeNum.
+ \param "currMB" "Pointer to the current AVCMacroblock structure."
+ \param "stream" "Pointer to the AVCEncBitstream."
+ \return "void"
+ */
+ AVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream);
+
+ /**
+ This function encodes trailing ones and total coefficient.
+ \param "stream" "Pointer to the AVCEncBitstream."
+ \param "TrailingOnes" "The trailing one variable output."
+ \param "TotalCoeff" "The total coefficient variable output."
+ \param "nC" "Context for number of nonzero coefficient (prediction context)."
+ \return "AVCENC_SUCCESS for success or else for bitstream failure."
+ */
+ AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC);
+
+ /**
+ This function encodes trailing ones and total coefficient for chroma DC block.
+ \param "stream" "Pointer to the AVCEncBitstream."
+ \param "TrailingOnes" "The trailing one variable output."
+ \param "TotalCoeff" "The total coefficient variable output."
+ \return "AVCENC_SUCCESS for success or else for bitstream failure."
+ */
+ AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff);
+
+ /**
+ This function encodes total_zeros value as in Table 9-7 and 9-8.
+ \param "stream" "Pointer to the AVCEncBitstream."
+ \param "TotalZeros" "The total_zeros value."
+ \param "TotalCoeff" "The total coefficient variable output."
+ \return "AVCENC_SUCCESS for success or else for bitstream failure."
+ */
+ AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff);
+
+ /**
+ This function encodes total_zeros VLC syntax for chroma DC as in Table 9-9.
+ \param "stream" "Pointer to the AVCEncBitstream."
+ \param "TotalZeros" "The total_zeros value."
+ \param "TotalCoeff" "The total coefficient variable output."
+ \return "AVCENC_SUCCESS for success or else for bitstream failure."
+ */
+ AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff);
+
+ /**
+ This function encodes run_before VLC syntax as in Table 9-10.
+ \param "stream" "Pointer to the AVCEncBitstream."
+ \param "run_before" "The run_before value."
+ \param "zerosLeft" "The context for number of zeros left."
+ \return "AVCENC_SUCCESS for success or else for bitstream failure."
+ */
+ AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _AVCENC_LIB_H_ */
+
diff --git a/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp b/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
new file mode 100644
index 0000000..75ab514
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
@@ -0,0 +1,336 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+#define WORD_SIZE 32
+
+/* array for trailing bit pattern as function of number of bits */
+/* the first one is unused. */
+const static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};
+
+/* ======================================================================== */
+/* Function : BitstreamInit() */
+/* Date : 11/4/2003 */
+/* Purpose : Populate bitstream structure with bitstream buffer and size */
+/* it also initializes internal data */
+/* In/out : */
+/* Return : AVCENC_SUCCESS if successed, AVCENC_FAIL if failed. */
+/* Modified : */
+/* ======================================================================== */
+/* |--------|--------|----~~~~~-----|---------|---------|---------|
+ ^ ^write_pos ^buf_size
+ bitstreamBuffer <--------->
+ current_word
+
+ |-----xxxxxxxxxxxxx| = current_word 32 or 16 bits
+ <---->
+ bit_left
+ ======================================================================== */
+
+AVCEnc_Status BitstreamEncInit(AVCEncBitstream *stream, uint8 *buffer, int buf_size,
+ uint8 *overrunBuffer, int oBSize)
+{
+ if (stream == NULL || buffer == NULL || buf_size <= 0)
+ {
+ return AVCENC_BITSTREAM_INIT_FAIL;
+ }
+
+ stream->bitstreamBuffer = buffer;
+
+ stream->buf_size = buf_size;
+
+ stream->write_pos = 0;
+
+ stream->count_zeros = 0;
+
+ stream->current_word = 0;
+
+ stream->bit_left = WORD_SIZE;
+
+ stream->overrunBuffer = overrunBuffer;
+
+ stream->oBSize = oBSize;
+
+ return AVCENC_SUCCESS;
+}
+
+/* ======================================================================== */
+/* Function : AVCBitstreamSaveWord() */
+/* Date : 3/29/2004 */
+/* Purpose : Save the current_word into the buffer, byte-swap, and */
+/* add emulation prevention insertion. */
+/* In/out : */
+/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
+/* full. */
+/* Modified : */
+/* ======================================================================== */
+AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream)
+{
+ int num_bits;
+ uint8 *write_pnt, byte;
+ uint current_word;
+
+ /* check number of bytes in current_word, must always be byte-aligned!!!! */
+ num_bits = WORD_SIZE - stream->bit_left; /* must be multiple of 8 !!*/
+
+ if (stream->buf_size - stream->write_pos <= (num_bits >> 3) + 2) /* 2 more bytes for possible EPBS */
+ {
+ if (AVCENC_SUCCESS != AVCBitstreamUseOverrunBuffer(stream, (num_bits >> 3) + 2))
+ {
+ return AVCENC_BITSTREAM_BUFFER_FULL;
+ }
+ }
+
+ /* write word, byte-by-byte */
+ write_pnt = stream->bitstreamBuffer + stream->write_pos;
+ current_word = stream->current_word;
+ while (num_bits) /* no need to check stream->buf_size and stream->write_pos, taken care already */
+ {
+ num_bits -= 8;
+ byte = (current_word >> num_bits) & 0xFF;
+ if (byte != 0)
+ {
+ *write_pnt++ = byte;
+ stream->write_pos++;
+ stream->count_zeros = 0;
+ }
+ else
+ {
+ stream->count_zeros++;
+ *write_pnt++ = byte;
+ stream->write_pos++;
+ if (stream->count_zeros == 2)
+ { /* for num_bits = 32, this can add 2 more bytes extra for EPBS */
+ *write_pnt++ = 0x3;
+ stream->write_pos++;
+ stream->count_zeros = 0;
+ }
+ }
+ }
+
+ /* reset current_word and bit_left */
+ stream->current_word = 0;
+ stream->bit_left = WORD_SIZE;
+
+ return AVCENC_SUCCESS;
+}
+
+/* ======================================================================== */
+/* Function : BitstreamWriteBits() */
+/* Date : 3/29/2004 */
+/* Purpose : Write up to machine word. */
+/* In/out : Unused bits in 'code' must be all zeros. */
+/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
+/* full. */
+/* Modified : */
+/* ======================================================================== */
+AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ int bit_left = stream->bit_left;
+ uint current_word = stream->current_word;
+
+ //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"BitstreamWriteBits",nBits,-1);
+
+ if (nBits > WORD_SIZE) /* has to be taken care of specially */
+ {
+ return AVCENC_FAIL; /* for now */
+ /* otherwise, break it down to 2 write of less than 16 bits at a time. */
+ }
+
+ if (nBits <= bit_left) /* more bits left in current_word */
+ {
+ stream->current_word = (current_word << nBits) | code;
+ stream->bit_left -= nBits;
+ if (stream->bit_left == 0) /* prepare for the next word */
+ {
+ status = AVCBitstreamSaveWord(stream);
+ return status;
+ }
+ }
+ else
+ {
+ stream->current_word = (current_word << bit_left) | (code >> (nBits - bit_left));
+
+ nBits -= bit_left;
+
+ stream->bit_left = 0;
+
+ status = AVCBitstreamSaveWord(stream); /* save current word */
+
+ stream->bit_left = WORD_SIZE - nBits;
+
+ stream->current_word = code; /* no extra masking for code, must be handled before saving */
+ }
+
+ return status;
+}
+
+
+/* ======================================================================== */
+/* Function : BitstreamWrite1Bit() */
+/* Date : 3/30/2004 */
+/* Purpose : Write 1 bit */
+/* In/out : Unused bits in 'code' must be all zeros. */
+/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
+/* full. */
+/* Modified : */
+/* ======================================================================== */
+AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code)
+{
+ AVCEnc_Status status;
+ uint current_word = stream->current_word;
+
+ //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"BitstreamWrite1Bit",code,-1);
+
+ //if(1 <= bit_left) /* more bits left in current_word */
+ /* we can assume that there always be positive bit_left in the current word */
+ stream->current_word = (current_word << 1) | code;
+ stream->bit_left--;
+ if (stream->bit_left == 0) /* prepare for the next word */
+ {
+ status = AVCBitstreamSaveWord(stream);
+ return status;
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+
+/* ======================================================================== */
+/* Function : BitstreamTrailingBits() */
+/* Date : 3/31/2004 */
+/* Purpose : Add trailing bits and report the final EBSP size. */
+/* In/out : */
+/* Return : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is */
+/* full. */
+/* Modified : */
+/* ======================================================================== */
+AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size)
+{
+ (void)(nal_size);
+
+ AVCEnc_Status status;
+ int bit_left = bitstream->bit_left;
+
+ bit_left &= 0x7; /* modulo by 8 */
+ if (bit_left == 0) bit_left = 8;
+ /* bitstream->bit_left == 0 cannot happen here since it would have been Saved already */
+
+ status = BitstreamWriteBits(bitstream, bit_left, trailing_bits[bit_left]);
+
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ /* if it's not saved, save it. */
+ //if(bitstream->bit_left<(WORD_SIZE<<3)) /* in fact, no need to check */
+ {
+ status = AVCBitstreamSaveWord(bitstream);
+ }
+
+ return status;
+}
+
+/* check whether it's byte-aligned */
+bool byte_aligned(AVCEncBitstream *stream)
+{
+ if (stream->bit_left % 8)
+ return false;
+ else
+ return true;
+}
+
+
+/* determine whether overrun buffer can be used or not */
+AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes)
+{
+ AVCEncObject *encvid = (AVCEncObject*)stream->encvid;
+
+ if (stream->overrunBuffer != NULL) // overrunBuffer is set
+ {
+ if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used
+ {
+ if (stream->write_pos + numExtraBytes >= stream->oBSize)
+ {
+ stream->oBSize = stream->write_pos + numExtraBytes + 100;
+ stream->oBSize &= (~0x3); // make it multiple of 4
+
+ // allocate new overrun Buffer
+ if (encvid->overrunBuffer)
+ {
+ encvid->avcHandle->CBAVC_Free((uint32*)encvid->avcHandle->userData,
+ (int)encvid->overrunBuffer);
+ }
+
+ encvid->oBSize = stream->oBSize;
+ encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
+ stream->oBSize, DEFAULT_ATTR);
+
+ stream->overrunBuffer = encvid->overrunBuffer;
+ if (stream->overrunBuffer == NULL)
+ {
+ return AVCENC_FAIL;
+ }
+ }
+
+ // copy everything to overrun buffer and start using it.
+ memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->write_pos);
+ stream->bitstreamBuffer = stream->overrunBuffer;
+ stream->buf_size = stream->oBSize;
+ }
+ else // overrun buffer is already used
+ {
+ stream->oBSize = stream->write_pos + numExtraBytes + 100;
+ stream->oBSize &= (~0x3); // make it multiple of 4
+
+ // allocate new overrun buffer
+ encvid->oBSize = stream->oBSize;
+ encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
+ stream->oBSize, DEFAULT_ATTR);
+
+ if (encvid->overrunBuffer == NULL)
+ {
+ return AVCENC_FAIL;
+ }
+
+
+ // copy from the old buffer to new buffer
+ memcpy(encvid->overrunBuffer, stream->overrunBuffer, stream->write_pos);
+ // free old buffer
+ encvid->avcHandle->CBAVC_Free((uint32*)encvid->avcHandle->userData,
+ (int)stream->overrunBuffer);
+
+ // assign pointer to new buffer
+ stream->overrunBuffer = encvid->overrunBuffer;
+ stream->bitstreamBuffer = stream->overrunBuffer;
+ stream->buf_size = stream->oBSize;
+ }
+
+ return AVCENC_SUCCESS;
+ }
+ else // overrunBuffer is not enable.
+ {
+ return AVCENC_FAIL;
+ }
+
+}
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/block.cpp b/media/libstagefright/codecs/avc/enc/src/block.cpp
new file mode 100644
index 0000000..01e26a6
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/block.cpp
@@ -0,0 +1,1283 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+/* subtract with the prediction and do transformation */
+void trans(uint8 *cur, int pitch, uint8 *predBlock, int16 *dataBlock)
+{
+ int16 *ptr = dataBlock;
+ int r0, r1, r2, r3, j;
+ int curpitch = (uint)pitch >> 16;
+ int predpitch = (pitch & 0xFFFF);
+
+ /* horizontal */
+ j = 4;
+ while (j > 0)
+ {
+ /* calculate the residue first */
+ r0 = cur[0] - predBlock[0];
+ r1 = cur[1] - predBlock[1];
+ r2 = cur[2] - predBlock[2];
+ r3 = cur[3] - predBlock[3];
+
+ r0 += r3; //ptr[0] + ptr[3];
+ r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
+ r1 += r2; //ptr[1] + ptr[2];
+ r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
+
+ ptr[0] = r0 + r1;
+ ptr[2] = r0 - r1;
+ ptr[1] = (r3 << 1) + r2;
+ ptr[3] = r3 - (r2 << 1);
+
+ ptr += 16;
+ predBlock += predpitch;
+ cur += curpitch;
+ j--;
+ }
+ /* vertical */
+ ptr = dataBlock;
+ j = 4;
+ while (j > 0)
+ {
+ r0 = ptr[0] + ptr[48];
+ r3 = ptr[0] - ptr[48];
+ r1 = ptr[16] + ptr[32];
+ r2 = ptr[16] - ptr[32];
+
+ ptr[0] = r0 + r1;
+ ptr[32] = r0 - r1;
+ ptr[16] = (r3 << 1) + r2;
+ ptr[48] = r3 - (r2 << 1);
+
+ ptr++;
+ j--;
+ }
+
+ return ;
+}
+
+
+/* do residue transform quant invquant, invtrans and write output out */
+int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost)
+{
+ AVCCommonObj *video = encvid->common;
+ int org_pitch = encvid->currInput->pitch;
+ int pitch = video->currPic->pitch;
+ int16 *coef = video->block;
+ uint8 *pred = video->pred_block; // size 16 for a 4x4 block
+ int pred_pitch = video->pred_pitch;
+ int r0, r1, r2, r3, j, k, idx;
+ int *level, *run;
+ int Qq, Rq, q_bits, qp_const, quant;
+ int data, lev, zero_run;
+ int numcoeff;
+
+ coef += ((blkidx & 0x3) << 2) + ((blkidx >> 2) << 6); /* point to the 4x4 block */
+
+ /* first take a 4x4 transform */
+ /* horizontal */
+ j = 4;
+ while (j > 0)
+ {
+ /* calculate the residue first */
+ r0 = org[0] - pred[0]; /* OPTIMIZEABLE */
+ r1 = org[1] - pred[1];
+ r2 = org[2] - pred[2];
+ r3 = org[3] - pred[3];
+
+ r0 += r3; //ptr[0] + ptr[3];
+ r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
+ r1 += r2; //ptr[1] + ptr[2];
+ r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
+
+ coef[0] = r0 + r1;
+ coef[2] = r0 - r1;
+ coef[1] = (r3 << 1) + r2;
+ coef[3] = r3 - (r2 << 1);
+
+ coef += 16;
+ org += org_pitch;
+ pred += pred_pitch;
+ j--;
+ }
+ /* vertical */
+ coef -= 64;
+ pred -= (pred_pitch << 2);
+ j = 4;
+ while (j > 0) /* OPTIMIZABLE */
+ {
+ r0 = coef[0] + coef[48];
+ r3 = coef[0] - coef[48];
+ r1 = coef[16] + coef[32];
+ r2 = coef[16] - coef[32];
+
+ coef[0] = r0 + r1;
+ coef[32] = r0 - r1;
+ coef[16] = (r3 << 1) + r2;
+ coef[48] = r3 - (r2 << 1);
+
+ coef++;
+ j--;
+ }
+
+ coef -= 4;
+
+ /* quant */
+ level = encvid->level[ras2dec[blkidx]];
+ run = encvid->run[ras2dec[blkidx]];
+
+ Rq = video->QPy_mod_6;
+ Qq = video->QPy_div_6;
+ qp_const = encvid->qp_const;
+ q_bits = 15 + Qq;
+
+ zero_run = 0;
+ numcoeff = 0;
+ for (k = 0; k < 16; k++)
+ {
+ idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */
+ data = coef[idx];
+ quant = quant_coef[Rq][k];
+ if (data > 0)
+ {
+ lev = data * quant + qp_const;
+ }
+ else
+ {
+ lev = -data * quant + qp_const;
+ }
+ lev >>= q_bits;
+ if (lev)
+ {
+ *coef_cost += ((lev > 1) ? MAX_VALUE : COEFF_COST[DISABLE_THRESHOLDING][zero_run]);
+
+ /* dequant */
+ quant = dequant_coefres[Rq][k];
+ if (data > 0)
+ {
+ level[numcoeff] = lev;
+ coef[idx] = (lev * quant) << Qq;
+ }
+ else
+ {
+ level[numcoeff] = -lev;
+ coef[idx] = (-lev * quant) << Qq;
+ }
+ run[numcoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ coef[idx] = 0;
+ }
+ }
+
+ if (video->currMB->mb_intra) // only do inverse transform with intra block
+ {
+ if (numcoeff) /* then do inverse transform */
+ {
+ for (j = 4; j > 0; j--) /* horizontal */
+ {
+ r0 = coef[0] + coef[2];
+ r1 = coef[0] - coef[2];
+ r2 = (coef[1] >> 1) - coef[3];
+ r3 = coef[1] + (coef[3] >> 1);
+
+ coef[0] = r0 + r3;
+ coef[1] = r1 + r2;
+ coef[2] = r1 - r2;
+ coef[3] = r0 - r3;
+
+ coef += 16;
+ }
+
+ coef -= 64;
+ for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */
+ {
+ r0 = coef[0] + coef[32];
+ r1 = coef[0] - coef[32];
+ r2 = (coef[16] >> 1) - coef[48];
+ r3 = coef[16] + (coef[48] >> 1);
+ r0 += r3;
+ r3 = (r0 - (r3 << 1)); /* r0-r3 */
+ r1 += r2;
+ r2 = (r1 - (r2 << 1)); /* r1-r2 */
+ r0 += 32;
+ r1 += 32;
+ r2 += 32;
+ r3 += 32;
+
+ r0 = pred[0] + (r0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ r1 = *(pred += pred_pitch) + (r1 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ r2 = *(pred += pred_pitch) + (r2 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ r3 = pred[pred_pitch] + (r3 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+
+ *cur = r0;
+ *(cur += pitch) = r1;
+ *(cur += pitch) = r2;
+ cur[pitch] = r3;
+ cur -= (pitch << 1);
+ cur++;
+ pred -= (pred_pitch << 1);
+ pred++;
+ coef++;
+ }
+ }
+ else // copy from pred to cur
+ {
+ *((uint32*)cur) = *((uint32*)pred);
+ *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));
+ *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));
+ *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));
+ }
+ }
+
+ return numcoeff;
+}
+
+
+void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch)
+{
+ int16 *coef, *coef8 = video->block;
+ uint8 *cur; // the same as curL
+ int b8, b4;
+ int r0, r1, r2, r3, j, blkidx;
+
+ for (b8 = 0; b8 < 4; b8++)
+ {
+ cur = curL;
+ coef = coef8;
+
+ if (currMB->CBP&(1 << b8))
+ {
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ blkidx = blkIdx2blkXY[b8][b4];
+ /* do IDCT */
+ if (currMB->nz_coeff[blkidx])
+ {
+ for (j = 4; j > 0; j--) /* horizontal */
+ {
+ r0 = coef[0] + coef[2];
+ r1 = coef[0] - coef[2];
+ r2 = (coef[1] >> 1) - coef[3];
+ r3 = coef[1] + (coef[3] >> 1);
+
+ coef[0] = r0 + r3;
+ coef[1] = r1 + r2;
+ coef[2] = r1 - r2;
+ coef[3] = r0 - r3;
+
+ coef += 16;
+ }
+
+ coef -= 64;
+ for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */
+ {
+ r0 = coef[0] + coef[32];
+ r1 = coef[0] - coef[32];
+ r2 = (coef[16] >> 1) - coef[48];
+ r3 = coef[16] + (coef[48] >> 1);
+ r0 += r3;
+ r3 = (r0 - (r3 << 1)); /* r0-r3 */
+ r1 += r2;
+ r2 = (r1 - (r2 << 1)); /* r1-r2 */
+ r0 += 32;
+ r1 += 32;
+ r2 += 32;
+ r3 += 32;
+
+ r0 = cur[0] + (r0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ *cur = r0;
+ r1 = *(cur += picPitch) + (r1 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ *cur = r1;
+ r2 = *(cur += picPitch) + (r2 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ *cur = r2;
+ r3 = cur[picPitch] + (r3 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+ cur[picPitch] = r3;
+
+ cur -= (picPitch << 1);
+ cur++;
+ coef++;
+ }
+ cur -= 4;
+ coef -= 4;
+ }
+ if (b4&1)
+ {
+ cur += ((picPitch << 2) - 4);
+ coef += 60;
+ }
+ else
+ {
+ cur += 4;
+ coef += 4;
+ }
+ }
+ }
+
+ if (b8&1)
+ {
+ curL += ((picPitch << 3) - 8);
+ coef8 += 120;
+ }
+ else
+ {
+ curL += 8;
+ coef8 += 8;
+ }
+ }
+
+ return ;
+}
+
+/* performa dct, quant, iquant, idct for the entire MB */
+void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL)
+{
+ AVCCommonObj *video = encvid->common;
+ int pitch = video->currPic->pitch;
+ int org_pitch = encvid->currInput->pitch;
+ AVCMacroblock *currMB = video->currMB;
+ int16 *coef = video->block;
+ uint8 *pred = encvid->pred_i16[currMB->i16Mode];
+ int blk_x, blk_y, j, k, idx, b8, b4;
+ int r0, r1, r2, r3, m0, m1, m2 , m3;
+ int data, lev;
+ int *level, *run, zero_run, ncoeff;
+ int Rq, Qq, quant, q_bits, qp_const;
+ int offset_cur[4], offset_pred[4], offset;
+
+ /* horizontal */
+ for (j = 16; j > 0; j--)
+ {
+ for (blk_x = 4; blk_x > 0; blk_x--)
+ {
+ /* calculate the residue first */
+ r0 = *orgL++ - *pred++;
+ r1 = *orgL++ - *pred++;
+ r2 = *orgL++ - *pred++;
+ r3 = *orgL++ - *pred++;
+
+ r0 += r3; //ptr[0] + ptr[3];
+ r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
+ r1 += r2; //ptr[1] + ptr[2];
+ r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
+
+ *coef++ = r0 + r1;
+ *coef++ = (r3 << 1) + r2;
+ *coef++ = r0 - r1;
+ *coef++ = r3 - (r2 << 1);
+ }
+ orgL += (org_pitch - 16);
+ }
+ pred -= 256;
+ coef -= 256;
+ /* vertical */
+ for (blk_y = 4; blk_y > 0; blk_y--)
+ {
+ for (j = 16; j > 0; j--)
+ {
+ r0 = coef[0] + coef[48];
+ r3 = coef[0] - coef[48];
+ r1 = coef[16] + coef[32];
+ r2 = coef[16] - coef[32];
+
+ coef[0] = r0 + r1;
+ coef[32] = r0 - r1;
+ coef[16] = (r3 << 1) + r2;
+ coef[48] = r3 - (r2 << 1);
+
+ coef++;
+ }
+ coef += 48;
+ }
+
+ /* then perform DC transform */
+ coef -= 256;
+ for (j = 4; j > 0; j--)
+ {
+ r0 = coef[0] + coef[12];
+ r3 = coef[0] - coef[12];
+ r1 = coef[4] + coef[8];
+ r2 = coef[4] - coef[8];
+
+ coef[0] = r0 + r1;
+ coef[8] = r0 - r1;
+ coef[4] = r3 + r2;
+ coef[12] = r3 - r2;
+ coef += 64;
+ }
+ coef -= 256;
+ for (j = 4; j > 0; j--)
+ {
+ r0 = coef[0] + coef[192];
+ r3 = coef[0] - coef[192];
+ r1 = coef[64] + coef[128];
+ r2 = coef[64] - coef[128];
+
+ coef[0] = (r0 + r1) >> 1;
+ coef[128] = (r0 - r1) >> 1;
+ coef[64] = (r3 + r2) >> 1;
+ coef[192] = (r3 - r2) >> 1;
+ coef += 4;
+ }
+
+ coef -= 16;
+ // then quantize DC
+ level = encvid->leveldc;
+ run = encvid->rundc;
+
+ Rq = video->QPy_mod_6;
+ Qq = video->QPy_div_6;
+ quant = quant_coef[Rq][0];
+ q_bits = 15 + Qq;
+ qp_const = encvid->qp_const;
+
+ zero_run = 0;
+ ncoeff = 0;
+ for (k = 0; k < 16; k++) /* in zigzag scan order */
+ {
+ idx = ZIGZAG2RASTERDC[k];
+ data = coef[idx];
+ if (data > 0) // quant
+ {
+ lev = data * quant + (qp_const << 1);
+ }
+ else
+ {
+ lev = -data * quant + (qp_const << 1);
+ }
+ lev >>= (q_bits + 1);
+ if (lev) // dequant
+ {
+ if (data > 0)
+ {
+ level[ncoeff] = lev;
+ coef[idx] = lev;
+ }
+ else
+ {
+ level[ncoeff] = -lev;
+ coef[idx] = -lev;
+ }
+ run[ncoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ coef[idx] = 0;
+ }
+ }
+
+ /* inverse transform DC */
+ encvid->numcoefdc = ncoeff;
+ if (ncoeff)
+ {
+ quant = dequant_coefres[Rq][0];
+
+ for (j = 0; j < 4; j++)
+ {
+ m0 = coef[0] + coef[4];
+ m1 = coef[0] - coef[4];
+ m2 = coef[8] + coef[12];
+ m3 = coef[8] - coef[12];
+
+
+ coef[0] = m0 + m2;
+ coef[4] = m0 - m2;
+ coef[8] = m1 - m3;
+ coef[12] = m1 + m3;
+ coef += 64;
+ }
+
+ coef -= 256;
+
+ if (Qq >= 2) /* this way should be faster than JM */
+ { /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */
+ Qq -= 2;
+ for (j = 0; j < 4; j++)
+ {
+ m0 = coef[0] + coef[64];
+ m1 = coef[0] - coef[64];
+ m2 = coef[128] + coef[192];
+ m3 = coef[128] - coef[192];
+
+ coef[0] = ((m0 + m2) * quant) << Qq;
+ coef[64] = ((m0 - m2) * quant) << Qq;
+ coef[128] = ((m1 - m3) * quant) << Qq;
+ coef[192] = ((m1 + m3) * quant) << Qq;
+ coef += 4;
+ }
+ Qq += 2; /* restore the value */
+ }
+ else
+ {
+ Qq = 2 - Qq;
+ offset = 1 << (Qq - 1);
+
+ for (j = 0; j < 4; j++)
+ {
+ m0 = coef[0] + coef[64];
+ m1 = coef[0] - coef[64];
+ m2 = coef[128] + coef[192];
+ m3 = coef[128] - coef[192];
+
+ coef[0] = (((m0 + m2) * quant + offset) >> Qq);
+ coef[64] = (((m0 - m2) * quant + offset) >> Qq);
+ coef[128] = (((m1 - m3) * quant + offset) >> Qq);
+ coef[192] = (((m1 + m3) * quant + offset) >> Qq);
+ coef += 4;
+ }
+ Qq = 2 - Qq; /* restore the value */
+ }
+ coef -= 16; /* back to the origin */
+ }
+
+ /* now zigzag scan ac coefs, quant, iquant and itrans */
+ run = encvid->run[0];
+ level = encvid->level[0];
+
+ /* offset btw 4x4 block */
+ offset_cur[0] = 0;
+ offset_cur[1] = (pitch << 2) - 8;
+
+ /* offset btw 8x8 block */
+ offset_cur[2] = 8 - (pitch << 3);
+ offset_cur[3] = -8;
+
+ /* similarly for pred */
+ offset_pred[0] = 0;
+ offset_pred[1] = 56;
+ offset_pred[2] = -120;
+ offset_pred[3] = -8;
+
+ currMB->CBP = 0;
+
+ for (b8 = 0; b8 < 4; b8++)
+ {
+ for (b4 = 0; b4 < 4; b4++)
+ {
+
+ zero_run = 0;
+ ncoeff = 0;
+
+ for (k = 1; k < 16; k++)
+ {
+ idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */
+ data = coef[idx];
+ quant = quant_coef[Rq][k];
+ if (data > 0)
+ {
+ lev = data * quant + qp_const;
+ }
+ else
+ {
+ lev = -data * quant + qp_const;
+ }
+ lev >>= q_bits;
+ if (lev)
+ { /* dequant */
+ quant = dequant_coefres[Rq][k];
+ if (data > 0)
+ {
+ level[ncoeff] = lev;
+ coef[idx] = (lev * quant) << Qq;
+ }
+ else
+ {
+ level[ncoeff] = -lev;
+ coef[idx] = (-lev * quant) << Qq;
+ }
+ run[ncoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ coef[idx] = 0;
+ }
+ }
+
+ currMB->nz_coeff[blkIdx2blkXY[b8][b4]] = ncoeff; /* in raster scan !!! */
+ if (ncoeff)
+ {
+ currMB->CBP |= (1 << b8);
+
+ // do inverse transform here
+ for (j = 4; j > 0; j--)
+ {
+ r0 = coef[0] + coef[2];
+ r1 = coef[0] - coef[2];
+ r2 = (coef[1] >> 1) - coef[3];
+ r3 = coef[1] + (coef[3] >> 1);
+
+ coef[0] = r0 + r3;
+ coef[1] = r1 + r2;
+ coef[2] = r1 - r2;
+ coef[3] = r0 - r3;
+
+ coef += 16;
+ }
+ coef -= 64;
+ for (j = 4; j > 0; j--)
+ {
+ r0 = coef[0] + coef[32];
+ r1 = coef[0] - coef[32];
+ r2 = (coef[16] >> 1) - coef[48];
+ r3 = coef[16] + (coef[48] >> 1);
+
+ r0 += r3;
+ r3 = (r0 - (r3 << 1)); /* r0-r3 */
+ r1 += r2;
+ r2 = (r1 - (r2 << 1)); /* r1-r2 */
+ r0 += 32;
+ r1 += 32;
+ r2 += 32;
+ r3 += 32;
+ r0 = pred[0] + (r0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ r1 = pred[16] + (r1 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ r2 = pred[32] + (r2 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ r3 = pred[48] + (r3 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+ *curL = r0;
+ *(curL += pitch) = r1;
+ *(curL += pitch) = r2;
+ curL[pitch] = r3;
+ curL -= (pitch << 1);
+ curL++;
+ pred++;
+ coef++;
+ }
+ }
+ else // do DC-only inverse
+ {
+ m0 = coef[0] + 32;
+
+ for (j = 4; j > 0; j--)
+ {
+ r0 = pred[0] + (m0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ r1 = pred[16] + (m0 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ r2 = pred[32] + (m0 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ r3 = pred[48] + (m0 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+ *curL = r0;
+ *(curL += pitch) = r1;
+ *(curL += pitch) = r2;
+ curL[pitch] = r3;
+ curL -= (pitch << 1);
+ curL++;
+ pred++;
+ }
+ coef += 4;
+ }
+
+ run += 16; // follow coding order
+ level += 16;
+ curL += offset_cur[b4&1];
+ pred += offset_pred[b4&1];
+ coef += offset_pred[b4&1];
+ }
+
+ curL += offset_cur[2 + (b8&1)];
+ pred += offset_pred[2 + (b8&1)];
+ coef += offset_pred[2 + (b8&1)];
+ }
+
+ return ;
+}
+
+
+void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCMacroblock *currMB = video->currMB;
+ int org_pitch = (encvid->currInput->pitch) >> 1;
+ int pitch = (video->currPic->pitch) >> 1;
+ int pred_pitch = 16;
+ int16 *coef = video->block + 256;
+ uint8 *pred = video->pred_block;
+ int j, blk_x, blk_y, k, idx, b4;
+ int r0, r1, r2, r3, m0;
+ int Qq, Rq, qp_const, q_bits, quant;
+ int *level, *run, zero_run, ncoeff;
+ int data, lev;
+ int offset_cur[2], offset_pred[2], offset_coef[2];
+ uint8 nz_temp[4];
+ int coeff_cost;
+
+ if (cr)
+ {
+ coef += 8;
+ pred += 8;
+ }
+
+ if (currMB->mb_intra == 0) // inter mode
+ {
+ pred = curC;
+ pred_pitch = pitch;
+ }
+
+ /* do 4x4 transform */
+ /* horizontal */
+ for (j = 8; j > 0; j--)
+ {
+ for (blk_x = 2; blk_x > 0; blk_x--)
+ {
+ /* calculate the residue first */
+ r0 = *orgC++ - *pred++;
+ r1 = *orgC++ - *pred++;
+ r2 = *orgC++ - *pred++;
+ r3 = *orgC++ - *pred++;
+
+ r0 += r3; //ptr[0] + ptr[3];
+ r3 = r0 - (r3 << 1); //ptr[0] - ptr[3];
+ r1 += r2; //ptr[1] + ptr[2];
+ r2 = r1 - (r2 << 1); //ptr[1] - ptr[2];
+
+ *coef++ = r0 + r1;
+ *coef++ = (r3 << 1) + r2;
+ *coef++ = r0 - r1;
+ *coef++ = r3 - (r2 << 1);
+
+ }
+ coef += 8; // coef pitch is 16
+ pred += (pred_pitch - 8); // pred_pitch is 16
+ orgC += (org_pitch - 8);
+ }
+ pred -= (pred_pitch << 3);
+ coef -= 128;
+ /* vertical */
+ for (blk_y = 2; blk_y > 0; blk_y--)
+ {
+ for (j = 8; j > 0; j--)
+ {
+ r0 = coef[0] + coef[48];
+ r3 = coef[0] - coef[48];
+ r1 = coef[16] + coef[32];
+ r2 = coef[16] - coef[32];
+
+ coef[0] = r0 + r1;
+ coef[32] = r0 - r1;
+ coef[16] = (r3 << 1) + r2;
+ coef[48] = r3 - (r2 << 1);
+
+ coef++;
+ }
+ coef += 56;
+ }
+ /* then perform DC transform */
+ coef -= 128;
+
+ /* 2x2 transform of DC components*/
+ r0 = coef[0];
+ r1 = coef[4];
+ r2 = coef[64];
+ r3 = coef[68];
+
+ coef[0] = r0 + r1 + r2 + r3;
+ coef[4] = r0 - r1 + r2 - r3;
+ coef[64] = r0 + r1 - r2 - r3;
+ coef[68] = r0 - r1 - r2 + r3;
+
+ Qq = video->QPc_div_6;
+ Rq = video->QPc_mod_6;
+ quant = quant_coef[Rq][0];
+ q_bits = 15 + Qq;
+ qp_const = encvid->qp_const_c;
+
+ zero_run = 0;
+ ncoeff = 0;
+ run = encvid->runcdc + (cr << 2);
+ level = encvid->levelcdc + (cr << 2);
+
+ /* in zigzag scan order */
+ for (k = 0; k < 4; k++)
+ {
+ idx = ((k >> 1) << 6) + ((k & 1) << 2);
+ data = coef[idx];
+ if (data > 0)
+ {
+ lev = data * quant + (qp_const << 1);
+ }
+ else
+ {
+ lev = -data * quant + (qp_const << 1);
+ }
+ lev >>= (q_bits + 1);
+ if (lev)
+ {
+ if (data > 0)
+ {
+ level[ncoeff] = lev;
+ coef[idx] = lev;
+ }
+ else
+ {
+ level[ncoeff] = -lev;
+ coef[idx] = -lev;
+ }
+ run[ncoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ coef[idx] = 0;
+ }
+ }
+
+ encvid->numcoefcdc[cr] = ncoeff;
+
+ if (ncoeff)
+ {
+ currMB->CBP |= (1 << 4); // DC present
+ // do inverse transform
+ quant = dequant_coefres[Rq][0];
+
+ r0 = coef[0] + coef[4];
+ r1 = coef[0] - coef[4];
+ r2 = coef[64] + coef[68];
+ r3 = coef[64] - coef[68];
+
+ r0 += r2;
+ r2 = r0 - (r2 << 1);
+ r1 += r3;
+ r3 = r1 - (r3 << 1);
+
+ if (Qq >= 1)
+ {
+ Qq -= 1;
+ coef[0] = (r0 * quant) << Qq;
+ coef[4] = (r1 * quant) << Qq;
+ coef[64] = (r2 * quant) << Qq;
+ coef[68] = (r3 * quant) << Qq;
+ Qq++;
+ }
+ else
+ {
+ coef[0] = (r0 * quant) >> 1;
+ coef[4] = (r1 * quant) >> 1;
+ coef[64] = (r2 * quant) >> 1;
+ coef[68] = (r3 * quant) >> 1;
+ }
+ }
+
+ /* now do AC zigzag scan, quant, iquant and itrans */
+ if (cr)
+ {
+ run = encvid->run[20];
+ level = encvid->level[20];
+ }
+ else
+ {
+ run = encvid->run[16];
+ level = encvid->level[16];
+ }
+
+ /* offset btw 4x4 block */
+ offset_cur[0] = 0;
+ offset_cur[1] = (pitch << 2) - 8;
+ offset_pred[0] = 0;
+ offset_pred[1] = (pred_pitch << 2) - 8;
+ offset_coef[0] = 0;
+ offset_coef[1] = 56;
+
+ coeff_cost = 0;
+
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ zero_run = 0;
+ ncoeff = 0;
+ for (k = 1; k < 16; k++) /* in zigzag scan order */
+ {
+ idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */
+ data = coef[idx];
+ quant = quant_coef[Rq][k];
+ if (data > 0)
+ {
+ lev = data * quant + qp_const;
+ }
+ else
+ {
+ lev = -data * quant + qp_const;
+ }
+ lev >>= q_bits;
+ if (lev)
+ {
+ /* for RD performance*/
+ if (lev > 1)
+ coeff_cost += MAX_VALUE; // set high cost, shall not be discarded
+ else
+ coeff_cost += COEFF_COST[DISABLE_THRESHOLDING][zero_run];
+
+ /* dequant */
+ quant = dequant_coefres[Rq][k];
+ if (data > 0)
+ {
+ level[ncoeff] = lev;
+ coef[idx] = (lev * quant) << Qq;
+ }
+ else
+ {
+ level[ncoeff] = -lev;
+ coef[idx] = (-lev * quant) << Qq;
+ }
+ run[ncoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ coef[idx] = 0;
+ }
+ }
+
+ nz_temp[b4] = ncoeff; // raster scan
+
+ // just advance the pointers for now, do IDCT later
+ coef += 4;
+ run += 16;
+ level += 16;
+ coef += offset_coef[b4&1];
+ }
+
+ /* rewind the pointers */
+ coef -= 128;
+
+ if (coeff_cost < _CHROMA_COEFF_COST_)
+ {
+ /* if it's not efficient to encode any blocks.
+ Just do DC only */
+ /* We can reset level and run also, but setting nz to zero should be enough. */
+ currMB->nz_coeff[16+(cr<<1)] = 0;
+ currMB->nz_coeff[17+(cr<<1)] = 0;
+ currMB->nz_coeff[20+(cr<<1)] = 0;
+ currMB->nz_coeff[21+(cr<<1)] = 0;
+
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ // do DC-only inverse
+ m0 = coef[0] + 32;
+
+ for (j = 4; j > 0; j--)
+ {
+ r0 = pred[0] + (m0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ r1 = *(pred += pred_pitch) + (m0 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ r2 = pred[pred_pitch] + (m0 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ r3 = pred[pred_pitch<<1] + (m0 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+ *curC = r0;
+ *(curC += pitch) = r1;
+ *(curC += pitch) = r2;
+ curC[pitch] = r3;
+ curC -= (pitch << 1);
+ curC++;
+ pred += (1 - pred_pitch);
+ }
+ coef += 4;
+ curC += offset_cur[b4&1];
+ pred += offset_pred[b4&1];
+ coef += offset_coef[b4&1];
+ }
+ }
+ else // not dropping anything, continue with the IDCT
+ {
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ ncoeff = nz_temp[b4] ; // in raster scan
+ currMB->nz_coeff[16+(b4&1)+(cr<<1)+((b4>>1)<<2)] = ncoeff; // in raster scan
+
+ if (ncoeff) // do a check on the nonzero-coeff
+ {
+ currMB->CBP |= (2 << 4);
+
+ // do inverse transform here
+ for (j = 4; j > 0; j--)
+ {
+ r0 = coef[0] + coef[2];
+ r1 = coef[0] - coef[2];
+ r2 = (coef[1] >> 1) - coef[3];
+ r3 = coef[1] + (coef[3] >> 1);
+
+ coef[0] = r0 + r3;
+ coef[1] = r1 + r2;
+ coef[2] = r1 - r2;
+ coef[3] = r0 - r3;
+
+ coef += 16;
+ }
+ coef -= 64;
+ for (j = 4; j > 0; j--)
+ {
+ r0 = coef[0] + coef[32];
+ r1 = coef[0] - coef[32];
+ r2 = (coef[16] >> 1) - coef[48];
+ r3 = coef[16] + (coef[48] >> 1);
+
+ r0 += r3;
+ r3 = (r0 - (r3 << 1)); /* r0-r3 */
+ r1 += r2;
+ r2 = (r1 - (r2 << 1)); /* r1-r2 */
+ r0 += 32;
+ r1 += 32;
+ r2 += 32;
+ r3 += 32;
+ r0 = pred[0] + (r0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ r1 = *(pred += pred_pitch) + (r1 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ r2 = pred[pred_pitch] + (r2 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ r3 = pred[pred_pitch<<1] + (r3 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+ *curC = r0;
+ *(curC += pitch) = r1;
+ *(curC += pitch) = r2;
+ curC[pitch] = r3;
+ curC -= (pitch << 1);
+ curC++;
+ pred += (1 - pred_pitch);
+ coef++;
+ }
+ }
+ else
+ {
+ // do DC-only inverse
+ m0 = coef[0] + 32;
+
+ for (j = 4; j > 0; j--)
+ {
+ r0 = pred[0] + (m0 >> 6);
+ if ((uint)r0 > 0xFF) r0 = 0xFF & (~(r0 >> 31)); /* clip */
+ r1 = *(pred += pred_pitch) + (m0 >> 6);
+ if ((uint)r1 > 0xFF) r1 = 0xFF & (~(r1 >> 31)); /* clip */
+ r2 = pred[pred_pitch] + (m0 >> 6);
+ if ((uint)r2 > 0xFF) r2 = 0xFF & (~(r2 >> 31)); /* clip */
+ r3 = pred[pred_pitch<<1] + (m0 >> 6);
+ if ((uint)r3 > 0xFF) r3 = 0xFF & (~(r3 >> 31)); /* clip */
+ *curC = r0;
+ *(curC += pitch) = r1;
+ *(curC += pitch) = r2;
+ curC[pitch] = r3;
+ curC -= (pitch << 1);
+ curC++;
+ pred += (1 - pred_pitch);
+ }
+ coef += 4;
+ }
+ curC += offset_cur[b4&1];
+ pred += offset_pred[b4&1];
+ coef += offset_coef[b4&1];
+ }
+ }
+
+ return ;
+}
+
+
+/* only DC transform */
+int TransQuantIntra16DC(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ int16 *block = video->block;
+ int *level = encvid->leveldc;
+ int *run = encvid->rundc;
+ int16 *ptr = block;
+ int r0, r1, r2, r3, j;
+ int Qq = video->QPy_div_6;
+ int Rq = video->QPy_mod_6;
+ int q_bits, qp_const, quant;
+ int data, lev, zero_run;
+ int k, ncoeff, idx;
+
+ /* DC transform */
+ /* horizontal */
+ j = 4;
+ while (j)
+ {
+ r0 = ptr[0] + ptr[12];
+ r3 = ptr[0] - ptr[12];
+ r1 = ptr[4] + ptr[8];
+ r2 = ptr[4] - ptr[8];
+
+ ptr[0] = r0 + r1;
+ ptr[8] = r0 - r1;
+ ptr[4] = r3 + r2;
+ ptr[12] = r3 - r2;
+ ptr += 64;
+ j--;
+ }
+ /* vertical */
+ ptr = block;
+ j = 4;
+ while (j)
+ {
+ r0 = ptr[0] + ptr[192];
+ r3 = ptr[0] - ptr[192];
+ r1 = ptr[64] + ptr[128];
+ r2 = ptr[64] - ptr[128];
+
+ ptr[0] = (r0 + r1) >> 1;
+ ptr[128] = (r0 - r1) >> 1;
+ ptr[64] = (r3 + r2) >> 1;
+ ptr[192] = (r3 - r2) >> 1;
+ ptr += 4;
+ j--;
+ }
+
+ quant = quant_coef[Rq][0];
+ q_bits = 15 + Qq;
+ qp_const = (1 << q_bits) / 3; // intra
+
+ zero_run = 0;
+ ncoeff = 0;
+
+ for (k = 0; k < 16; k++) /* in zigzag scan order */
+ {
+ idx = ZIGZAG2RASTERDC[k];
+ data = block[idx];
+ if (data > 0)
+ {
+ lev = data * quant + (qp_const << 1);
+ }
+ else
+ {
+ lev = -data * quant + (qp_const << 1);
+ }
+ lev >>= (q_bits + 1);
+ if (lev)
+ {
+ if (data > 0)
+ {
+ level[ncoeff] = lev;
+ block[idx] = lev;
+ }
+ else
+ {
+ level[ncoeff] = -lev;
+ block[idx] = -lev;
+ }
+ run[ncoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ block[idx] = 0;
+ }
+ }
+ return ncoeff;
+}
+
+int TransQuantChromaDC(AVCEncObject *encvid, int16 *block, int slice_type, int cr)
+{
+ AVCCommonObj *video = encvid->common;
+ int *level, *run;
+ int r0, r1, r2, r3;
+ int Qq, Rq, q_bits, qp_const, quant;
+ int data, lev, zero_run;
+ int k, ncoeff, idx;
+
+ level = encvid->levelcdc + (cr << 2); /* cb or cr */
+ run = encvid->runcdc + (cr << 2);
+
+ /* 2x2 transform of DC components*/
+ r0 = block[0];
+ r1 = block[4];
+ r2 = block[64];
+ r3 = block[68];
+
+ block[0] = r0 + r1 + r2 + r3;
+ block[4] = r0 - r1 + r2 - r3;
+ block[64] = r0 + r1 - r2 - r3;
+ block[68] = r0 - r1 - r2 + r3;
+
+ Qq = video->QPc_div_6;
+ Rq = video->QPc_mod_6;
+ quant = quant_coef[Rq][0];
+ q_bits = 15 + Qq;
+ if (slice_type == AVC_I_SLICE)
+ {
+ qp_const = (1 << q_bits) / 3;
+ }
+ else
+ {
+ qp_const = (1 << q_bits) / 6;
+ }
+
+ zero_run = 0;
+ ncoeff = 0;
+
+ for (k = 0; k < 4; k++) /* in zigzag scan order */
+ {
+ idx = ((k >> 1) << 6) + ((k & 1) << 2);
+ data = block[idx];
+ if (data > 0)
+ {
+ lev = data * quant + (qp_const << 1);
+ }
+ else
+ {
+ lev = -data * quant + (qp_const << 1);
+ }
+ lev >>= (q_bits + 1);
+ if (lev)
+ {
+ if (data > 0)
+ {
+ level[ncoeff] = lev;
+ block[idx] = lev;
+ }
+ else
+ {
+ level[ncoeff] = -lev;
+ block[idx] = -lev;
+ }
+ run[ncoeff++] = zero_run;
+ zero_run = 0;
+ }
+ else
+ {
+ zero_run++;
+ block[idx] = 0;
+ }
+ }
+ return ncoeff;
+}
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
new file mode 100644
index 0000000..38a2a15
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
@@ -0,0 +1,622 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+/* 3/29/01 fast half-pel search based on neighboring guess */
+/* value ranging from 0 to 4, high complexity (more accurate) to
+ low complexity (less accurate) */
+#define HP_DISTANCE_TH 5 // 2 /* half-pel distance threshold */
+
+#define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/
+
+const static int distance_tab[9][9] = /* [hp_guess][k] */
+{
+ {0, 1, 1, 1, 1, 1, 1, 1, 1},
+ {1, 0, 1, 2, 3, 4, 3, 2, 1},
+ {1, 0, 0, 0, 1, 2, 3, 2, 1},
+ {1, 2, 1, 0, 1, 2, 3, 4, 3},
+ {1, 2, 1, 0, 0, 0, 1, 2, 3},
+ {1, 4, 3, 2, 1, 0, 1, 2, 3},
+ {1, 2, 3, 2, 1, 0, 0, 0, 1},
+ {1, 2, 3, 4, 3, 2, 1, 0, 1},
+ {1, 0, 1, 2, 3, 2, 1, 0, 0}
+};
+
+#define CLIP_RESULT(x) if((uint)x > 0xFF){ \
+ x = 0xFF & (~(x>>31));}
+
+#define CLIP_UPPER16(x) if((uint)x >= 0x20000000){ \
+ x = 0xFF0000 & (~(x>>31));} \
+ else { \
+ x = (x>>5)&0xFF0000; \
+ }
+
+/*=====================================================================
+ Function: AVCFindHalfPelMB
+ Date: 10/31/2007
+ Purpose: Find half pel resolution MV surrounding the full-pel MV
+=====================================================================*/
+
+int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand,
+ int xpos, int ypos, int hp_guess, int cmvx, int cmvy)
+{
+ AVCPictureData *currPic = encvid->common->currPic;
+ int lx = currPic->pitch;
+ int d, dmin, satd_min;
+ uint8* cand;
+ int lambda_motion = encvid->lambda_motion;
+ uint8 *mvbits = encvid->mvbits;
+ int mvcost;
+ /* list of candidate to go through for half-pel search*/
+ uint8 *subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions
+ uint8 **hpel_cand = (uint8**) encvid->hpel_cand; /* half-pel position */
+
+ int xh[9] = {0, 0, 2, 2, 2, 0, -2, -2, -2};
+ int yh[9] = {0, -2, -2, 0, 2, 2, 2, 0, -2};
+ int xq[8] = {0, 1, 1, 1, 0, -1, -1, -1};
+ int yq[8] = { -1, -1, 0, 1, 1, 1, 0, -1};
+ int h, hmin, q, qmin;
+
+ OSCL_UNUSED_ARG(xpos);
+ OSCL_UNUSED_ARG(ypos);
+ OSCL_UNUSED_ARG(hp_guess);
+
+ GenerateHalfPelPred(subpel_pred, ncand, lx);
+
+ cur = encvid->currYMB; // pre-load current original MB
+
+ cand = hpel_cand[0];
+
+ // find cost for the current full-pel position
+ dmin = SATD_MB(cand, cur, 65535); // get Hadamaard transform SAD
+ mvcost = MV_COST_S(lambda_motion, mot->x, mot->y, cmvx, cmvy);
+ satd_min = dmin;
+ dmin += mvcost;
+ hmin = 0;
+
+ /* find half-pel */
+ for (h = 1; h < 9; h++)
+ {
+ d = SATD_MB(hpel_cand[h], cur, dmin);
+ mvcost = MV_COST_S(lambda_motion, mot->x + xh[h], mot->y + yh[h], cmvx, cmvy);
+ d += mvcost;
+
+ if (d < dmin)
+ {
+ dmin = d;
+ hmin = h;
+ satd_min = d - mvcost;
+ }
+ }
+
+ mot->sad = dmin;
+ mot->x += xh[hmin];
+ mot->y += yh[hmin];
+ encvid->best_hpel_pos = hmin;
+
+ /*** search for quarter-pel ****/
+ GenerateQuartPelPred(encvid->bilin_base[hmin], &(encvid->qpel_cand[0][0]), hmin);
+
+ encvid->best_qpel_pos = qmin = -1;
+
+ for (q = 0; q < 8; q++)
+ {
+ d = SATD_MB(encvid->qpel_cand[q], cur, dmin);
+ mvcost = MV_COST_S(lambda_motion, mot->x + xq[q], mot->y + yq[q], cmvx, cmvy);
+ d += mvcost;
+ if (d < dmin)
+ {
+ dmin = d;
+ qmin = q;
+ satd_min = d - mvcost;
+ }
+ }
+
+ if (qmin != -1)
+ {
+ mot->sad = dmin;
+ mot->x += xq[qmin];
+ mot->y += yq[qmin];
+ encvid->best_qpel_pos = qmin;
+ }
+
+ return satd_min;
+}
+
+
+
+/** This function generates sub-pel prediction around the full-pel candidate.
+Each sub-pel position array is 20 pixel wide (for word-alignment) and 17 pixel tall. */
+/** The sub-pel position is labeled in spiral manner from the center. */
+
+void GenerateHalfPelPred(uint8* subpel_pred, uint8 *ncand, int lx)
+{
+ /* let's do straightforward way first */
+ uint8 *ref;
+ uint8 *dst;
+ uint8 tmp8;
+ int32 tmp32;
+ int16 tmp_horz[18*22], *dst_16, *src_16;
+ register int a = 0, b = 0, c = 0, d = 0, e = 0, f = 0; // temp register
+ int msk;
+ int i, j;
+
+ /* first copy full-pel to the first array */
+ /* to be optimized later based on byte-offset load */
+ ref = ncand - 3 - lx - (lx << 1); /* move back (-3,-3) */
+ dst = subpel_pred;
+
+ dst -= 4; /* offset */
+ for (j = 0; j < 22; j++) /* 24x22 */
+ {
+ i = 6;
+ while (i > 0)
+ {
+ tmp32 = *ref++;
+ tmp8 = *ref++;
+ tmp32 |= (tmp8 << 8);
+ tmp8 = *ref++;
+ tmp32 |= (tmp8 << 16);
+ tmp8 = *ref++;
+ tmp32 |= (tmp8 << 24);
+ *((uint32*)(dst += 4)) = tmp32;
+ i--;
+ }
+ ref += (lx - 24);
+ }
+
+ /* from the first array, we do horizontal interp */
+ ref = subpel_pred + 2;
+ dst_16 = tmp_horz; /* 17 x 22 */
+
+ for (j = 4; j > 0; j--)
+ {
+ for (i = 16; i > 0; i -= 4)
+ {
+ a = ref[-2];
+ b = ref[-1];
+ c = ref[0];
+ d = ref[1];
+ e = ref[2];
+ f = ref[3];
+ *dst_16++ = a + f - 5 * (b + e) + 20 * (c + d);
+ a = ref[4];
+ *dst_16++ = b + a - 5 * (c + f) + 20 * (d + e);
+ b = ref[5];
+ *dst_16++ = c + b - 5 * (d + a) + 20 * (e + f);
+ c = ref[6];
+ *dst_16++ = d + c - 5 * (e + b) + 20 * (f + a);
+
+ ref += 4;
+ }
+ /* do the 17th column here */
+ d = ref[3];
+ *dst_16 = e + d - 5 * (f + c) + 20 * (a + b);
+ dst_16 += 2; /* stride for tmp_horz is 18 */
+ ref += 8; /* stride for ref is 24 */
+ if (j == 3) // move 18 lines down
+ {
+ dst_16 += 324;//18*18;
+ ref += 432;//18*24;
+ }
+ }
+
+ ref -= 480;//20*24;
+ dst_16 -= 360;//20*18;
+ dst = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* go to the 14th array 17x18*/
+
+ for (j = 18; j > 0; j--)
+ {
+ for (i = 16; i > 0; i -= 4)
+ {
+ a = ref[-2];
+ b = ref[-1];
+ c = ref[0];
+ d = ref[1];
+ e = ref[2];
+ f = ref[3];
+ tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
+ *dst_16++ = tmp32;
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *dst++ = tmp32;
+
+ a = ref[4];
+ tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
+ *dst_16++ = tmp32;
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *dst++ = tmp32;
+
+ b = ref[5];
+ tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
+ *dst_16++ = tmp32;
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *dst++ = tmp32;
+
+ c = ref[6];
+ tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
+ *dst_16++ = tmp32;
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *dst++ = tmp32;
+
+ ref += 4;
+ }
+ /* do the 17th column here */
+ d = ref[3];
+ tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
+ *dst_16 = tmp32;
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *dst = tmp32;
+
+ dst += 8; /* stride for dst is 24 */
+ dst_16 += 2; /* stride for tmp_horz is 18 */
+ ref += 8; /* stride for ref is 24 */
+ }
+
+
+ /* Do middle point filtering*/
+ src_16 = tmp_horz; /* 17 x 22 */
+ dst = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* 12th array 17x17*/
+ dst -= 24; // offset
+ for (i = 0; i < 17; i++)
+ {
+ for (j = 16; j > 0; j -= 4)
+ {
+ a = *src_16;
+ b = *(src_16 += 18);
+ c = *(src_16 += 18);
+ d = *(src_16 += 18);
+ e = *(src_16 += 18);
+ f = *(src_16 += 18);
+
+ tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
+ tmp32 = (tmp32 + 512) >> 10;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32;
+
+ a = *(src_16 += 18);
+ tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
+ tmp32 = (tmp32 + 512) >> 10;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32;
+
+ b = *(src_16 += 18);
+ tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
+ tmp32 = (tmp32 + 512) >> 10;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32;
+
+ c = *(src_16 += 18);
+ tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
+ tmp32 = (tmp32 + 512) >> 10;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32;
+
+ src_16 -= (18 << 2);
+ }
+
+ d = src_16[90]; // 18*5
+ tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
+ tmp32 = (tmp32 + 512) >> 10;
+ CLIP_RESULT(tmp32)
+ dst[24] = tmp32;
+
+ src_16 -= ((18 << 4) - 1);
+ dst -= ((24 << 4) - 1);
+ }
+
+ /* do vertical interpolation */
+ ref = subpel_pred + 2;
+ dst = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE; /* 10th array 18x17 */
+ dst -= 24; // offset
+
+ for (i = 2; i > 0; i--)
+ {
+ for (j = 16; j > 0; j -= 4)
+ {
+ a = *ref;
+ b = *(ref += 24);
+ c = *(ref += 24);
+ d = *(ref += 24);
+ e = *(ref += 24);
+ f = *(ref += 24);
+
+ tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ a = *(ref += 24);
+ tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ b = *(ref += 24);
+ tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ c = *(ref += 24);
+ tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ ref -= (24 << 2);
+ }
+
+ d = ref[120]; // 24*5
+ tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ dst[24] = tmp32; // 10th
+
+ dst -= ((24 << 4) - 1);
+ ref -= ((24 << 4) - 1);
+ }
+
+ // note that using SIMD here doesn't help much, the cycle almost stays the same
+ // one can just use the above code and change the for(i=2 to for(i=18
+ for (i = 16; i > 0; i -= 4)
+ {
+ msk = 0;
+ for (j = 17; j > 0; j--)
+ {
+ a = *((uint32*)ref); /* load 4 bytes */
+ b = (a >> 8) & 0xFF00FF; /* second and fourth byte */
+ a &= 0xFF00FF;
+
+ c = *((uint32*)(ref + 120));
+ d = (c >> 8) & 0xFF00FF;
+ c &= 0xFF00FF;
+
+ a += c;
+ b += d;
+
+ e = *((uint32*)(ref + 72)); /* e, f */
+ f = (e >> 8) & 0xFF00FF;
+ e &= 0xFF00FF;
+
+ c = *((uint32*)(ref + 48)); /* c, d */
+ d = (c >> 8) & 0xFF00FF;
+ c &= 0xFF00FF;
+
+ c += e;
+ d += f;
+
+ a += 20 * c;
+ b += 20 * d;
+ a += 0x100010;
+ b += 0x100010;
+
+ e = *((uint32*)(ref += 24)); /* e, f */
+ f = (e >> 8) & 0xFF00FF;
+ e &= 0xFF00FF;
+
+ c = *((uint32*)(ref + 72)); /* c, d */
+ d = (c >> 8) & 0xFF00FF;
+ c &= 0xFF00FF;
+
+ c += e;
+ d += f;
+
+ a -= 5 * c;
+ b -= 5 * d;
+
+ c = a << 16;
+ d = b << 16;
+ CLIP_UPPER16(a)
+ CLIP_UPPER16(c)
+ CLIP_UPPER16(b)
+ CLIP_UPPER16(d)
+
+ a |= (c >> 16);
+ b |= (d >> 16);
+ // a>>=5;
+ // b>>=5;
+ /* clip */
+ // msk |= b; msk|=a;
+ // a &= 0xFF00FF;
+ // b &= 0xFF00FF;
+ a |= (b << 8); /* pack it back */
+
+ *((uint16*)(dst += 24)) = a & 0xFFFF; //dst is not word-aligned.
+ *((uint16*)(dst + 2)) = a >> 16;
+
+ }
+ dst -= 404; // 24*17-4
+ ref -= 404;
+ /* if(msk & 0xFF00FF00) // need clipping
+ {
+ VertInterpWClip(dst,ref); // re-do 4 column with clip
+ }*/
+ }
+
+ return ;
+}
+
+void VertInterpWClip(uint8 *dst, uint8 *ref)
+{
+ int i, j;
+ int a, b, c, d, e, f;
+ int32 tmp32;
+
+ dst -= 4;
+ ref -= 4;
+
+ for (i = 4; i > 0; i--)
+ {
+ for (j = 16; j > 0; j -= 4)
+ {
+ a = *ref;
+ b = *(ref += 24);
+ c = *(ref += 24);
+ d = *(ref += 24);
+ e = *(ref += 24);
+ f = *(ref += 24);
+
+ tmp32 = a + f - 5 * (b + e) + 20 * (c + d);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ a = *(ref += 24);
+ tmp32 = b + a - 5 * (c + f) + 20 * (d + e);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ b = *(ref += 24);
+ tmp32 = c + b - 5 * (d + a) + 20 * (e + f);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ c = *(ref += 24);
+ tmp32 = d + c - 5 * (e + b) + 20 * (f + a);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ *(dst += 24) = tmp32; // 10th
+
+ ref -= (24 << 2);
+ }
+
+ d = ref[120]; // 24*5
+ tmp32 = e + d - 5 * (f + c) + 20 * (a + b);
+ tmp32 = (tmp32 + 16) >> 5;
+ CLIP_RESULT(tmp32)
+ dst[24] = tmp32; // 10th
+
+ dst -= ((24 << 4) - 1);
+ ref -= ((24 << 4) - 1);
+ }
+
+ return ;
+}
+
+
+void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_cand, int hpel_pos)
+{
+ // for even value of hpel_pos, start with pattern 1, otherwise, start with pattern 2
+ int i, j;
+
+ uint8 *c1 = qpel_cand;
+ uint8 *tl = bilin_base[0];
+ uint8 *tr = bilin_base[1];
+ uint8 *bl = bilin_base[2];
+ uint8 *br = bilin_base[3];
+ int a, b, c, d;
+ int offset = 1 - (384 * 7);
+
+ if (!(hpel_pos&1)) // diamond pattern
+ {
+ j = 16;
+ while (j--)
+ {
+ i = 16;
+ while (i--)
+ {
+ d = tr[24];
+ a = *tr++;
+ b = bl[1];
+ c = *br++;
+
+ *c1 = (c + a + 1) >> 1;
+ *(c1 += 384) = (b + a + 1) >> 1; /* c2 */
+ *(c1 += 384) = (b + c + 1) >> 1; /* c3 */
+ *(c1 += 384) = (b + d + 1) >> 1; /* c4 */
+
+ b = *bl++;
+
+ *(c1 += 384) = (c + d + 1) >> 1; /* c5 */
+ *(c1 += 384) = (b + d + 1) >> 1; /* c6 */
+ *(c1 += 384) = (b + c + 1) >> 1; /* c7 */
+ *(c1 += 384) = (b + a + 1) >> 1; /* c8 */
+
+ c1 += offset;
+ }
+ // advance to the next line, pitch is 24
+ tl += 8;
+ tr += 8;
+ bl += 8;
+ br += 8;
+ c1 += 8;
+ }
+ }
+ else // star pattern
+ {
+ j = 16;
+ while (j--)
+ {
+ i = 16;
+ while (i--)
+ {
+ a = *br++;
+ b = *tr++;
+ c = tl[1];
+ *c1 = (a + b + 1) >> 1;
+ b = bl[1];
+ *(c1 += 384) = (a + c + 1) >> 1; /* c2 */
+ c = tl[25];
+ *(c1 += 384) = (a + b + 1) >> 1; /* c3 */
+ b = tr[23];
+ *(c1 += 384) = (a + c + 1) >> 1; /* c4 */
+ c = tl[24];
+ *(c1 += 384) = (a + b + 1) >> 1; /* c5 */
+ b = *bl++;
+ *(c1 += 384) = (a + c + 1) >> 1; /* c6 */
+ c = *tl++;
+ *(c1 += 384) = (a + b + 1) >> 1; /* c7 */
+ *(c1 += 384) = (a + c + 1) >> 1; /* c8 */
+
+ c1 += offset;
+ }
+ // advance to the next line, pitch is 24
+ tl += 8;
+ tr += 8;
+ bl += 8;
+ br += 8;
+ c1 += 8;
+ }
+ }
+
+ return ;
+}
+
+
+/* assuming cand always has a pitch of 24 */
+int SATD_MB(uint8 *cand, uint8 *cur, int dmin)
+{
+ int cost;
+
+
+ dmin = (dmin << 16) | 24;
+ cost = AVCSAD_Macroblock_C(cand, cur, dmin, NULL);
+
+ return cost;
+}
+
+
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/header.cpp b/media/libstagefright/codecs/avc/enc/src/header.cpp
new file mode 100644
index 0000000..9acff9e
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/header.cpp
@@ -0,0 +1,917 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+#include "avcenc_api.h"
+
+/** see subclause 7.4.2.1 */
+/* no need for checking the valid range , already done in SetEncodeParam(),
+if we have to send another SPS, the ranges should be verified first before
+users call PVAVCEncodeSPS() */
+AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCSeqParamSet *seqParam = video->currSeqParams;
+ AVCVUIParams *vui = &(seqParam->vui_parameters);
+ int i;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+
+ //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,"EncodeSPS",-1,-1);
+
+ status = BitstreamWriteBits(stream, 8, seqParam->profile_idc);
+ status = BitstreamWrite1Bit(stream, seqParam->constrained_set0_flag);
+ status = BitstreamWrite1Bit(stream, seqParam->constrained_set1_flag);
+ status = BitstreamWrite1Bit(stream, seqParam->constrained_set2_flag);
+ status = BitstreamWrite1Bit(stream, seqParam->constrained_set3_flag);
+ status = BitstreamWriteBits(stream, 4, 0); /* forbidden zero bits */
+ if (status != AVCENC_SUCCESS) /* we can check after each write also */
+ {
+ return status;
+ }
+
+ status = BitstreamWriteBits(stream, 8, seqParam->level_idc);
+ status = ue_v(stream, seqParam->seq_parameter_set_id);
+ status = ue_v(stream, seqParam->log2_max_frame_num_minus4);
+ status = ue_v(stream, seqParam->pic_order_cnt_type);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ if (seqParam->pic_order_cnt_type == 0)
+ {
+ status = ue_v(stream, seqParam->log2_max_pic_order_cnt_lsb_minus4);
+ }
+ else if (seqParam->pic_order_cnt_type == 1)
+ {
+ status = BitstreamWrite1Bit(stream, seqParam->delta_pic_order_always_zero_flag);
+ status = se_v(stream, seqParam->offset_for_non_ref_pic); /* upto 32 bits */
+ status = se_v(stream, seqParam->offset_for_top_to_bottom_field); /* upto 32 bits */
+ status = ue_v(stream, seqParam->num_ref_frames_in_pic_order_cnt_cycle);
+
+ for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++)
+ {
+ status = se_v(stream, seqParam->offset_for_ref_frame[i]); /* upto 32 bits */
+ }
+ }
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = ue_v(stream, seqParam->num_ref_frames);
+ status = BitstreamWrite1Bit(stream, seqParam->gaps_in_frame_num_value_allowed_flag);
+ status = ue_v(stream, seqParam->pic_width_in_mbs_minus1);
+ status = ue_v(stream, seqParam->pic_height_in_map_units_minus1);
+ status = BitstreamWrite1Bit(stream, seqParam->frame_mbs_only_flag);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ /* if frame_mbs_only_flag is 0, then write, mb_adaptive_frame_field_frame here */
+
+ status = BitstreamWrite1Bit(stream, seqParam->direct_8x8_inference_flag);
+ status = BitstreamWrite1Bit(stream, seqParam->frame_cropping_flag);
+ if (seqParam->frame_cropping_flag)
+ {
+ status = ue_v(stream, seqParam->frame_crop_left_offset);
+ status = ue_v(stream, seqParam->frame_crop_right_offset);
+ status = ue_v(stream, seqParam->frame_crop_top_offset);
+ status = ue_v(stream, seqParam->frame_crop_bottom_offset);
+ }
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = BitstreamWrite1Bit(stream, seqParam->vui_parameters_present_flag);
+ if (seqParam->vui_parameters_present_flag)
+ {
+ /* not supported */
+ //return AVCENC_SPS_FAIL;
+ EncodeVUI(stream, vui);
+ }
+
+ return status;
+}
+
+
+void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui)
+{
+ int temp;
+
+ temp = vui->aspect_ratio_info_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ BitstreamWriteBits(stream, 8, vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255)
+ {
+ BitstreamWriteBits(stream, 16, vui->sar_width);
+ BitstreamWriteBits(stream, 16, vui->sar_height);
+ }
+ }
+ temp = vui->overscan_info_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ BitstreamWrite1Bit(stream, vui->overscan_appropriate_flag);
+ }
+ temp = vui->video_signal_type_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ BitstreamWriteBits(stream, 3, vui->video_format);
+ BitstreamWrite1Bit(stream, vui->video_full_range_flag);
+ temp = vui->colour_description_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ BitstreamWriteBits(stream, 8, vui->colour_primaries);
+ BitstreamWriteBits(stream, 8, vui->transfer_characteristics);
+ BitstreamWriteBits(stream, 8, vui->matrix_coefficients);
+ }
+ }
+ temp = vui->chroma_location_info_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ ue_v(stream, vui->chroma_sample_loc_type_top_field);
+ ue_v(stream, vui->chroma_sample_loc_type_bottom_field);
+ }
+
+ temp = vui->timing_info_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ BitstreamWriteBits(stream, 32, vui->num_units_in_tick);
+ BitstreamWriteBits(stream, 32, vui->time_scale);
+ BitstreamWrite1Bit(stream, vui->fixed_frame_rate_flag);
+ }
+
+ temp = vui->nal_hrd_parameters_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ EncodeHRD(stream, &(vui->nal_hrd_parameters));
+ }
+ temp = vui->vcl_hrd_parameters_present_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ EncodeHRD(stream, &(vui->vcl_hrd_parameters));
+ }
+ if (vui->nal_hrd_parameters_present_flag || vui->vcl_hrd_parameters_present_flag)
+ {
+ BitstreamWrite1Bit(stream, vui->low_delay_hrd_flag);
+ }
+ BitstreamWrite1Bit(stream, vui->pic_struct_present_flag);
+ temp = vui->bitstream_restriction_flag;
+ BitstreamWrite1Bit(stream, temp);
+ if (temp)
+ {
+ BitstreamWrite1Bit(stream, vui->motion_vectors_over_pic_boundaries_flag);
+ ue_v(stream, vui->max_bytes_per_pic_denom);
+ ue_v(stream, vui->max_bits_per_mb_denom);
+ ue_v(stream, vui->log2_max_mv_length_horizontal);
+ ue_v(stream, vui->log2_max_mv_length_vertical);
+ ue_v(stream, vui->max_dec_frame_reordering);
+ ue_v(stream, vui->max_dec_frame_buffering);
+ }
+
+ return ;
+}
+
+
+void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd)
+{
+ int i;
+
+ ue_v(stream, hrd->cpb_cnt_minus1);
+ BitstreamWriteBits(stream, 4, hrd->bit_rate_scale);
+ BitstreamWriteBits(stream, 4, hrd->cpb_size_scale);
+ for (i = 0; i <= (int)hrd->cpb_cnt_minus1; i++)
+ {
+ ue_v(stream, hrd->bit_rate_value_minus1[i]);
+ ue_v(stream, hrd->cpb_size_value_minus1[i]);
+ ue_v(stream, hrd->cbr_flag[i]);
+ }
+ BitstreamWriteBits(stream, 5, hrd->initial_cpb_removal_delay_length_minus1);
+ BitstreamWriteBits(stream, 5, hrd->cpb_removal_delay_length_minus1);
+ BitstreamWriteBits(stream, 5, hrd->dpb_output_delay_length_minus1);
+ BitstreamWriteBits(stream, 5, hrd->time_offset_length);
+
+ return ;
+}
+
+
+
+/** see subclause 7.4.2.2 */
+/* no need for checking the valid range , already done in SetEncodeParam().
+If we have to send another SPS, the ranges should be verified first before
+users call PVAVCEncodeSPS()*/
+AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ AVCPicParamSet *picParam = video->currPicParams;
+ int i, iGroup, numBits;
+ uint temp;
+
+ status = ue_v(stream, picParam->pic_parameter_set_id);
+ status = ue_v(stream, picParam->seq_parameter_set_id);
+ status = BitstreamWrite1Bit(stream, picParam->entropy_coding_mode_flag);
+ status = BitstreamWrite1Bit(stream, picParam->pic_order_present_flag);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = ue_v(stream, picParam->num_slice_groups_minus1);
+ if (picParam->num_slice_groups_minus1 > 0)
+ {
+ status = ue_v(stream, picParam->slice_group_map_type);
+ if (picParam->slice_group_map_type == 0)
+ {
+ for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++)
+ {
+ status = ue_v(stream, picParam->run_length_minus1[iGroup]);
+ }
+ }
+ else if (picParam->slice_group_map_type == 2)
+ {
+ for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++)
+ {
+ status = ue_v(stream, picParam->top_left[iGroup]);
+ status = ue_v(stream, picParam->bottom_right[iGroup]);
+ }
+ }
+ else if (picParam->slice_group_map_type == 3 ||
+ picParam->slice_group_map_type == 4 ||
+ picParam->slice_group_map_type == 5)
+ {
+ status = BitstreamWrite1Bit(stream, picParam->slice_group_change_direction_flag);
+ status = ue_v(stream, picParam->slice_group_change_rate_minus1);
+ }
+ else /*if(picParam->slice_group_map_type == 6)*/
+ {
+ status = ue_v(stream, picParam->pic_size_in_map_units_minus1);
+
+ numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */
+ i = picParam->num_slice_groups_minus1;
+ while (i > 0)
+ {
+ numBits++;
+ i >>= 1;
+ }
+
+ for (i = 0; i <= (int)picParam->pic_size_in_map_units_minus1; i++)
+ {
+ status = BitstreamWriteBits(stream, numBits, picParam->slice_group_id[i]);
+ }
+ }
+ }
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = ue_v(stream, picParam->num_ref_idx_l0_active_minus1);
+ status = ue_v(stream, picParam->num_ref_idx_l1_active_minus1);
+ status = BitstreamWrite1Bit(stream, picParam->weighted_pred_flag);
+ status = BitstreamWriteBits(stream, 2, picParam->weighted_bipred_idc);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = se_v(stream, picParam->pic_init_qp_minus26);
+ status = se_v(stream, picParam->pic_init_qs_minus26);
+ status = se_v(stream, picParam->chroma_qp_index_offset);
+
+ temp = picParam->deblocking_filter_control_present_flag << 2;
+ temp |= (picParam->constrained_intra_pred_flag << 1);
+ temp |= picParam->redundant_pic_cnt_present_flag;
+
+ status = BitstreamWriteBits(stream, 3, temp);
+
+ return status;
+}
+
+/** see subclause 7.4.3 */
+AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ AVCPicParamSet *currPPS = video->currPicParams;
+ AVCSeqParamSet *currSPS = video->currSeqParams;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ int slice_type, temp, i;
+ int num_bits;
+
+ num_bits = (stream->write_pos << 3) - stream->bit_left;
+
+ status = ue_v(stream, sliceHdr->first_mb_in_slice);
+
+ slice_type = video->slice_type;
+
+ if (video->mbNum == 0) /* first mb in frame */
+ {
+ status = ue_v(stream, sliceHdr->slice_type);
+ }
+ else
+ {
+ status = ue_v(stream, slice_type);
+ }
+
+ status = ue_v(stream, sliceHdr->pic_parameter_set_id);
+
+ status = BitstreamWriteBits(stream, currSPS->log2_max_frame_num_minus4 + 4, sliceHdr->frame_num);
+
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ /* if frame_mbs_only_flag is 0, encode field_pic_flag, bottom_field_flag here */
+
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ status = ue_v(stream, sliceHdr->idr_pic_id);
+ }
+
+ if (currSPS->pic_order_cnt_type == 0)
+ {
+ status = BitstreamWriteBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4,
+ sliceHdr->pic_order_cnt_lsb);
+
+ if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag)
+ {
+ status = se_v(stream, sliceHdr->delta_pic_order_cnt_bottom); /* 32 bits */
+ }
+ }
+ if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag)
+ {
+ status = se_v(stream, sliceHdr->delta_pic_order_cnt[0]); /* 32 bits */
+ if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag)
+ {
+ status = se_v(stream, sliceHdr->delta_pic_order_cnt[1]); /* 32 bits */
+ }
+ }
+
+ if (currPPS->redundant_pic_cnt_present_flag)
+ {
+ status = ue_v(stream, sliceHdr->redundant_pic_cnt);
+ }
+
+ if (slice_type == AVC_B_SLICE)
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->direct_spatial_mv_pred_flag);
+ }
+
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ if (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE || slice_type == AVC_B_SLICE)
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->num_ref_idx_active_override_flag);
+ if (sliceHdr->num_ref_idx_active_override_flag)
+ {
+ /* we shouldn't enter this part at all */
+ status = ue_v(stream, sliceHdr->num_ref_idx_l0_active_minus1);
+ if (slice_type == AVC_B_SLICE)
+ {
+ status = ue_v(stream, sliceHdr->num_ref_idx_l1_active_minus1);
+ }
+ }
+ }
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ /* ref_pic_list_reordering() */
+ status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) ||
+ (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE))
+ {
+ // pred_weight_table(); // not supported !!
+ return AVCENC_PRED_WEIGHT_TAB_FAIL;
+ }
+
+ if (video->nal_ref_idc != 0)
+ {
+ status = dec_ref_pic_marking(video, stream, sliceHdr);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ if (currPPS->entropy_coding_mode_flag && slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)
+ {
+ return AVCENC_CABAC_FAIL;
+ /* ue_v(stream,&(sliceHdr->cabac_init_idc));
+ if(sliceHdr->cabac_init_idc > 2){
+ // not supported !!!!
+ }*/
+ }
+
+ status = se_v(stream, sliceHdr->slice_qp_delta);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ if (slice_type == AVC_SP_SLICE || slice_type == AVC_SI_SLICE)
+ {
+ if (slice_type == AVC_SP_SLICE)
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->sp_for_switch_flag);
+ /* if sp_for_switch_flag is 0, P macroblocks in SP slice is decoded using
+ SP decoding process for non-switching pictures in 8.6.1 */
+ /* else, P macroblocks in SP slice is decoded using SP and SI decoding
+ process for switching picture in 8.6.2 */
+ }
+ status = se_v(stream, sliceHdr->slice_qs_delta);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ if (currPPS->deblocking_filter_control_present_flag)
+ {
+
+ status = ue_v(stream, sliceHdr->disable_deblocking_filter_idc);
+
+ if (sliceHdr->disable_deblocking_filter_idc != 1)
+ {
+ status = se_v(stream, sliceHdr->slice_alpha_c0_offset_div2);
+
+ status = se_v(stream, sliceHdr->slice_beta_offset_div_2);
+ }
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3
+ && currPPS->slice_group_map_type <= 5)
+ {
+ /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */
+ temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate;
+ if (video->PicSizeInMapUnits % video->SliceGroupChangeRate)
+ {
+ temp++;
+ }
+ i = 0;
+ while (temp > 1)
+ {
+ temp >>= 1;
+ i++;
+ }
+
+ BitstreamWriteBits(stream, i, sliceHdr->slice_group_change_cycle);
+ }
+
+
+ encvid->rateCtrl->NumberofHeaderBits += (stream->write_pos << 3) - stream->bit_left - num_bits;
+
+ return AVCENC_SUCCESS;
+}
+
+/** see subclause 7.4.3.1 */
+AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type)
+{
+ (void)(video);
+ int i;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+
+ if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l0);
+ if (sliceHdr->ref_pic_list_reordering_flag_l0)
+ {
+ i = 0;
+ do
+ {
+ status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l0[i]);
+ if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 ||
+ sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1)
+ {
+ status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l0[i]);
+ /* this check should be in InitSlice(), if we ever use it */
+ /*if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 &&
+ sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -1)
+ {
+ return AVCENC_REF_PIC_REORDER_FAIL; // out of range
+ }
+ if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 &&
+ sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -2)
+ {
+ return AVCENC_REF_PIC_REORDER_FAIL; // out of range
+ }*/
+ }
+ else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2)
+ {
+ status = ue_v(stream, sliceHdr->long_term_pic_num_l0[i]);
+ }
+ i++;
+ }
+ while (sliceHdr->reordering_of_pic_nums_idc_l0[i] != 3
+ && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ;
+ }
+ }
+ if (slice_type == AVC_B_SLICE)
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l1);
+ if (sliceHdr->ref_pic_list_reordering_flag_l1)
+ {
+ i = 0;
+ do
+ {
+ status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l1[i]);
+ if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 ||
+ sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1)
+ {
+ status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l1[i]);
+ /* This check should be in InitSlice() if we ever use it
+ if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 &&
+ sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -1)
+ {
+ return AVCENC_REF_PIC_REORDER_FAIL; // out of range
+ }
+ if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1 &&
+ sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -2)
+ {
+ return AVCENC_REF_PIC_REORDER_FAIL; // out of range
+ }*/
+ }
+ else if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 2)
+ {
+ status = ue_v(stream, sliceHdr->long_term_pic_num_l1[i]);
+ }
+ i++;
+ }
+ while (sliceHdr->reordering_of_pic_nums_idc_l1[i] != 3
+ && i <= (int)sliceHdr->num_ref_idx_l1_active_minus1 + 1) ;
+ }
+ }
+
+ return status;
+}
+
+/** see subclause 7.4.3.3 */
+AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr)
+{
+ int i;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->no_output_of_prior_pics_flag);
+ status = BitstreamWrite1Bit(stream, sliceHdr->long_term_reference_flag);
+ if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */
+ {
+ video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */
+ }
+ else /* used for long-term */
+ {
+ video->MaxLongTermFrameIdx = 0;
+ video->LongTermFrameIdx = 0;
+ }
+ }
+ else
+ {
+ status = BitstreamWrite1Bit(stream, sliceHdr->adaptive_ref_pic_marking_mode_flag); /* default to zero */
+ if (sliceHdr->adaptive_ref_pic_marking_mode_flag)
+ {
+ i = 0;
+ do
+ {
+ status = ue_v(stream, sliceHdr->memory_management_control_operation[i]);
+ if (sliceHdr->memory_management_control_operation[i] == 1 ||
+ sliceHdr->memory_management_control_operation[i] == 3)
+ {
+ status = ue_v(stream, sliceHdr->difference_of_pic_nums_minus1[i]);
+ }
+ if (sliceHdr->memory_management_control_operation[i] == 2)
+ {
+ status = ue_v(stream, sliceHdr->long_term_pic_num[i]);
+ }
+ if (sliceHdr->memory_management_control_operation[i] == 3 ||
+ sliceHdr->memory_management_control_operation[i] == 6)
+ {
+ status = ue_v(stream, sliceHdr->long_term_frame_idx[i]);
+ }
+ if (sliceHdr->memory_management_control_operation[i] == 4)
+ {
+ status = ue_v(stream, sliceHdr->max_long_term_frame_idx_plus1[i]);
+ }
+ i++;
+ }
+ while (sliceHdr->memory_management_control_operation[i] != 0 && i < MAX_DEC_REF_PIC_MARKING);
+ if (i >= MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[i] != 0)
+ {
+ return AVCENC_DEC_REF_PIC_MARK_FAIL; /* we're screwed!!, not enough memory */
+ }
+ }
+ }
+
+ return status;
+}
+
+/* see subclause 8.2.1 Decoding process for picture order count.
+See also PostPOC() for initialization of some variables. */
+AVCEnc_Status InitPOC(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCSeqParamSet *currSPS = video->currSeqParams;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ AVCFrameIO *currInput = encvid->currInput;
+ int i;
+
+ switch (currSPS->pic_order_cnt_type)
+ {
+ case 0: /* POC MODE 0 , subclause 8.2.1.1 */
+ /* encoding part */
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ encvid->dispOrdPOCRef = currInput->disp_order;
+ }
+ while (currInput->disp_order < encvid->dispOrdPOCRef)
+ {
+ encvid->dispOrdPOCRef -= video->MaxPicOrderCntLsb;
+ }
+ sliceHdr->pic_order_cnt_lsb = currInput->disp_order - encvid->dispOrdPOCRef;
+ while (sliceHdr->pic_order_cnt_lsb >= video->MaxPicOrderCntLsb)
+ {
+ sliceHdr->pic_order_cnt_lsb -= video->MaxPicOrderCntLsb;
+ }
+ /* decoding part */
+ /* Calculate the MSBs of current picture */
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ video->prevPicOrderCntMsb = 0;
+ video->prevPicOrderCntLsb = 0;
+ }
+ if (sliceHdr->pic_order_cnt_lsb < video->prevPicOrderCntLsb &&
+ (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb) >= (video->MaxPicOrderCntLsb / 2))
+ video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb;
+ else if (sliceHdr->pic_order_cnt_lsb > video->prevPicOrderCntLsb &&
+ (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb) > (video->MaxPicOrderCntLsb / 2))
+ video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb;
+ else
+ video->PicOrderCntMsb = video->prevPicOrderCntMsb;
+
+ /* JVT-I010 page 81 is different from JM7.3 */
+ if (!sliceHdr->field_pic_flag || !sliceHdr->bottom_field_flag)
+ {
+ video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;
+ }
+
+ if (!sliceHdr->field_pic_flag)
+ {
+ video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom;
+ }
+ else if (sliceHdr->bottom_field_flag)
+ {
+ video->PicOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;
+ }
+
+ if (!sliceHdr->field_pic_flag)
+ {
+ video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);
+ }
+
+ if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag)
+ {
+ sliceHdr->delta_pic_order_cnt_bottom = 0; /* defaulted to zero */
+ }
+
+ break;
+ case 1: /* POC MODE 1, subclause 8.2.1.2 */
+ /* calculate FrameNumOffset */
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ encvid->dispOrdPOCRef = currInput->disp_order; /* reset the reference point */
+ video->prevFrameNumOffset = 0;
+ video->FrameNumOffset = 0;
+ }
+ else if (video->prevFrameNum > sliceHdr->frame_num)
+ {
+ video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;
+ }
+ else
+ {
+ video->FrameNumOffset = video->prevFrameNumOffset;
+ }
+ /* calculate absFrameNum */
+ if (currSPS->num_ref_frames_in_pic_order_cnt_cycle)
+ {
+ video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num;
+ }
+ else
+ {
+ video->absFrameNum = 0;
+ }
+
+ if (video->absFrameNum > 0 && video->nal_ref_idc == 0)
+ {
+ video->absFrameNum--;
+ }
+
+ /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */
+ if (video->absFrameNum > 0)
+ {
+ video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle;
+ video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle;
+ }
+ /* derive expectedDeltaPerPicOrderCntCycle, this value can be computed up front. */
+ video->expectedDeltaPerPicOrderCntCycle = 0;
+ for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++)
+ {
+ video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i];
+ }
+ /* derive expectedPicOrderCnt */
+ if (video->absFrameNum)
+ {
+ video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle;
+ for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++)
+ {
+ video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i];
+ }
+ }
+ else
+ {
+ video->expectedPicOrderCnt = 0;
+ }
+
+ if (video->nal_ref_idc == 0)
+ {
+ video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic;
+ }
+ /* derive TopFieldOrderCnt and BottomFieldOrderCnt */
+ /* encoding part */
+ if (!currSPS->delta_pic_order_always_zero_flag)
+ {
+ sliceHdr->delta_pic_order_cnt[0] = currInput->disp_order - encvid->dispOrdPOCRef - video->expectedPicOrderCnt;
+
+ if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag)
+ {
+ sliceHdr->delta_pic_order_cnt[1] = sliceHdr->delta_pic_order_cnt[0]; /* should be calculated from currInput->bottom_field->disp_order */
+ }
+ else
+ {
+ sliceHdr->delta_pic_order_cnt[1] = 0;
+ }
+ }
+ else
+ {
+ sliceHdr->delta_pic_order_cnt[0] = sliceHdr->delta_pic_order_cnt[1] = 0;
+ }
+
+ if (sliceHdr->field_pic_flag == 0)
+ {
+ video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];
+ video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1];
+
+ video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);
+ }
+ else if (sliceHdr->bottom_field_flag == 0)
+ {
+ video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];
+ video->PicOrderCnt = video->TopFieldOrderCnt;
+ }
+ else
+ {
+ video->BottomFieldOrderCnt = video->expectedPicOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[0];
+ video->PicOrderCnt = video->BottomFieldOrderCnt;
+ }
+ break;
+
+
+ case 2: /* POC MODE 2, subclause 8.2.1.3 */
+ /* decoding order must be the same as display order */
+ /* we don't check for that. The decoder will just output in decoding order. */
+ /* Check for 2 consecutive non-reference frame */
+ if (video->nal_ref_idc == 0)
+ {
+ if (encvid->dispOrdPOCRef == 1)
+ {
+ return AVCENC_CONSECUTIVE_NONREF;
+ }
+ encvid->dispOrdPOCRef = 1; /* act as a flag for non ref */
+ }
+ else
+ {
+ encvid->dispOrdPOCRef = 0;
+ }
+
+
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ video->FrameNumOffset = 0;
+ }
+ else if (video->prevFrameNum > sliceHdr->frame_num)
+ {
+ video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;
+ }
+ else
+ {
+ video->FrameNumOffset = video->prevFrameNumOffset;
+ }
+ /* derive tempPicOrderCnt, we just use PicOrderCnt */
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ video->PicOrderCnt = 0;
+ }
+ else if (video->nal_ref_idc == 0)
+ {
+ video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1;
+ }
+ else
+ {
+ video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num);
+ }
+ /* derive TopFieldOrderCnt and BottomFieldOrderCnt */
+ if (sliceHdr->field_pic_flag == 0)
+ {
+ video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt;
+ }
+ else if (sliceHdr->bottom_field_flag)
+ {
+ video->BottomFieldOrderCnt = video->PicOrderCnt;
+ }
+ else
+ {
+ video->TopFieldOrderCnt = video->PicOrderCnt;
+ }
+ break;
+ default:
+ return AVCENC_POC_FAIL;
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+/** see subclause 8.2.1 */
+AVCEnc_Status PostPOC(AVCCommonObj *video)
+{
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ AVCSeqParamSet *currSPS = video->currSeqParams;
+
+ video->prevFrameNum = sliceHdr->frame_num;
+
+ switch (currSPS->pic_order_cnt_type)
+ {
+ case 0: /* subclause 8.2.1.1 */
+ if (video->mem_mgr_ctrl_eq_5)
+ {
+ video->prevPicOrderCntMsb = 0;
+ video->prevPicOrderCntLsb = video->TopFieldOrderCnt;
+ }
+ else
+ {
+ video->prevPicOrderCntMsb = video->PicOrderCntMsb;
+ video->prevPicOrderCntLsb = sliceHdr->pic_order_cnt_lsb;
+ }
+ break;
+ case 1: /* subclause 8.2.1.2 and 8.2.1.3 */
+ case 2:
+ if (video->mem_mgr_ctrl_eq_5)
+ {
+ video->prevFrameNumOffset = 0;
+ }
+ else
+ {
+ video->prevFrameNumOffset = video->FrameNumOffset;
+ }
+ break;
+ }
+
+ return AVCENC_SUCCESS;
+}
+
diff --git a/media/libstagefright/codecs/avc/enc/src/init.cpp b/media/libstagefright/codecs/avc/enc/src/init.cpp
new file mode 100644
index 0000000..c258b57
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/init.cpp
@@ -0,0 +1,899 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+#include "avcenc_api.h"
+
+#define LOG2_MAX_FRAME_NUM_MINUS4 12 /* 12 default */
+#define SLICE_GROUP_CHANGE_CYCLE 1 /* default */
+
+/* initialized variables to be used in SPS*/
+AVCEnc_Status SetEncodeParam(AVCHandle* avcHandle, AVCEncParams* encParam,
+ void* extSPS, void* extPPS)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+ AVCCommonObj *video = encvid->common;
+ AVCSeqParamSet *seqParam = video->currSeqParams;
+ AVCPicParamSet *picParam = video->currPicParams;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ AVCEnc_Status status;
+ void *userData = avcHandle->userData;
+ int ii, maxFrameNum;
+
+ AVCSeqParamSet* extS = NULL;
+ AVCPicParamSet* extP = NULL;
+
+ if (extSPS) extS = (AVCSeqParamSet*) extSPS;
+ if (extPPS) extP = (AVCPicParamSet*) extPPS;
+
+ /* This part sets the default values of the encoding options this
+ library supports in seqParam, picParam and sliceHdr structures and
+ also copy the values from the encParam into the above 3 structures.
+
+ Some parameters will be assigned later when we encode SPS or PPS such as
+ the seq_parameter_id or pic_parameter_id. Also some of the slice parameters
+ have to be re-assigned per slice basis such as frame_num, slice_type,
+ first_mb_in_slice, pic_order_cnt_lsb, slice_qp_delta, slice_group_change_cycle */
+
+ /* profile_idc, constrained_setx_flag and level_idc is set by VerifyProfile(),
+ and VerifyLevel() functions later. */
+
+ encvid->fullsearch_enable = encParam->fullsearch;
+
+ encvid->outOfBandParamSet = ((encParam->out_of_band_param_set == AVC_ON) ? TRUE : FALSE);
+
+ /* parameters derived from the the encParam that are used in SPS */
+ if (extS)
+ {
+ video->MaxPicOrderCntLsb = 1 << (extS->log2_max_pic_order_cnt_lsb_minus4 + 4);
+ video->PicWidthInMbs = extS->pic_width_in_mbs_minus1 + 1;
+ video->PicHeightInMapUnits = extS->pic_height_in_map_units_minus1 + 1 ;
+ video->FrameHeightInMbs = (2 - extS->frame_mbs_only_flag) * video->PicHeightInMapUnits ;
+ }
+ else
+ {
+ video->MaxPicOrderCntLsb = 1 << (encParam->log2_max_poc_lsb_minus_4 + 4);
+ video->PicWidthInMbs = (encParam->width + 15) >> 4; /* round it to multiple of 16 */
+ video->FrameHeightInMbs = (encParam->height + 15) >> 4; /* round it to multiple of 16 */
+ video->PicHeightInMapUnits = video->FrameHeightInMbs;
+ }
+
+ video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ;
+ if (video->PicWidthInSamplesL + 32 > 0xFFFF)
+ {
+ return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch
+ }
+
+ video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ;
+ video->PicHeightInMbs = video->FrameHeightInMbs;
+ video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ;
+ video->PicHeightInSamplesL = video->PicHeightInMbs * 16;
+ video->PicHeightInSamplesC = video->PicHeightInMbs * 8;
+ video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs;
+
+ if (!extS && !extP)
+ {
+ maxFrameNum = (encParam->idr_period == -1) ? (1 << 16) : encParam->idr_period;
+ ii = 0;
+ while (maxFrameNum > 0)
+ {
+ ii++;
+ maxFrameNum >>= 1;
+ }
+ if (ii < 4) ii = 4;
+ else if (ii > 16) ii = 16;
+
+ seqParam->log2_max_frame_num_minus4 = ii - 4;//LOG2_MAX_FRAME_NUM_MINUS4; /* default */
+
+ video->MaxFrameNum = 1 << ii; //(LOG2_MAX_FRAME_NUM_MINUS4 + 4); /* default */
+ video->MaxPicNum = video->MaxFrameNum;
+
+ /************* set the SPS *******************/
+ seqParam->seq_parameter_set_id = 0; /* start with zero */
+ /* POC */
+ seqParam->pic_order_cnt_type = encParam->poc_type; /* POC type */
+ if (encParam->poc_type == 0)
+ {
+ if (/*encParam->log2_max_poc_lsb_minus_4<0 || (no need, it's unsigned)*/
+ encParam->log2_max_poc_lsb_minus_4 > 12)
+ {
+ return AVCENC_INVALID_POC_LSB;
+ }
+ seqParam->log2_max_pic_order_cnt_lsb_minus4 = encParam->log2_max_poc_lsb_minus_4;
+ }
+ else if (encParam->poc_type == 1)
+ {
+ seqParam->delta_pic_order_always_zero_flag = encParam->delta_poc_zero_flag;
+ seqParam->offset_for_non_ref_pic = encParam->offset_poc_non_ref;
+ seqParam->offset_for_top_to_bottom_field = encParam->offset_top_bottom;
+ seqParam->num_ref_frames_in_pic_order_cnt_cycle = encParam->num_ref_in_cycle;
+ if (encParam->offset_poc_ref == NULL)
+ {
+ return AVCENC_ENCPARAM_MEM_FAIL;
+ }
+ for (ii = 0; ii < encParam->num_ref_frame; ii++)
+ {
+ seqParam->offset_for_ref_frame[ii] = encParam->offset_poc_ref[ii];
+ }
+ }
+ /* number of reference frame */
+ if (encParam->num_ref_frame > 16 || encParam->num_ref_frame < 0)
+ {
+ return AVCENC_INVALID_NUM_REF;
+ }
+ seqParam->num_ref_frames = encParam->num_ref_frame; /* num reference frame range 0...16*/
+ seqParam->gaps_in_frame_num_value_allowed_flag = FALSE;
+ seqParam->pic_width_in_mbs_minus1 = video->PicWidthInMbs - 1;
+ seqParam->pic_height_in_map_units_minus1 = video->PicHeightInMapUnits - 1;
+ seqParam->frame_mbs_only_flag = TRUE;
+ seqParam->mb_adaptive_frame_field_flag = FALSE;
+ seqParam->direct_8x8_inference_flag = FALSE; /* default */
+ seqParam->frame_cropping_flag = FALSE;
+ seqParam->frame_crop_bottom_offset = 0;
+ seqParam->frame_crop_left_offset = 0;
+ seqParam->frame_crop_right_offset = 0;
+ seqParam->frame_crop_top_offset = 0;
+ seqParam->vui_parameters_present_flag = FALSE; /* default */
+ }
+ else if (extS) // use external SPS and PPS
+ {
+ seqParam->seq_parameter_set_id = extS->seq_parameter_set_id;
+ seqParam->log2_max_frame_num_minus4 = extS->log2_max_frame_num_minus4;
+ video->MaxFrameNum = 1 << (extS->log2_max_frame_num_minus4 + 4);
+ video->MaxPicNum = video->MaxFrameNum;
+ if (encParam->idr_period > (int)(video->MaxFrameNum) || (encParam->idr_period == -1))
+ {
+ encParam->idr_period = (int)video->MaxFrameNum;
+ }
+
+ seqParam->pic_order_cnt_type = extS->pic_order_cnt_type;
+ if (seqParam->pic_order_cnt_type == 0)
+ {
+ if (/*extS->log2_max_pic_order_cnt_lsb_minus4<0 || (no need it's unsigned)*/
+ extS->log2_max_pic_order_cnt_lsb_minus4 > 12)
+ {
+ return AVCENC_INVALID_POC_LSB;
+ }
+ seqParam->log2_max_pic_order_cnt_lsb_minus4 = extS->log2_max_pic_order_cnt_lsb_minus4;
+ }
+ else if (seqParam->pic_order_cnt_type == 1)
+ {
+ seqParam->delta_pic_order_always_zero_flag = extS->delta_pic_order_always_zero_flag;
+ seqParam->offset_for_non_ref_pic = extS->offset_for_non_ref_pic;
+ seqParam->offset_for_top_to_bottom_field = extS->offset_for_top_to_bottom_field;
+ seqParam->num_ref_frames_in_pic_order_cnt_cycle = extS->num_ref_frames_in_pic_order_cnt_cycle;
+ if (extS->offset_for_ref_frame == NULL)
+ {
+ return AVCENC_ENCPARAM_MEM_FAIL;
+ }
+ for (ii = 0; ii < (int) extS->num_ref_frames; ii++)
+ {
+ seqParam->offset_for_ref_frame[ii] = extS->offset_for_ref_frame[ii];
+ }
+ }
+ /* number of reference frame */
+ if (extS->num_ref_frames > 16 /*|| extS->num_ref_frames<0 (no need, it's unsigned)*/)
+ {
+ return AVCENC_INVALID_NUM_REF;
+ }
+ seqParam->num_ref_frames = extS->num_ref_frames; /* num reference frame range 0...16*/
+ seqParam->gaps_in_frame_num_value_allowed_flag = extS->gaps_in_frame_num_value_allowed_flag;
+ seqParam->pic_width_in_mbs_minus1 = extS->pic_width_in_mbs_minus1;
+ seqParam->pic_height_in_map_units_minus1 = extS->pic_height_in_map_units_minus1;
+ seqParam->frame_mbs_only_flag = extS->frame_mbs_only_flag;
+ if (extS->frame_mbs_only_flag != TRUE)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+ seqParam->mb_adaptive_frame_field_flag = extS->mb_adaptive_frame_field_flag;
+ if (extS->mb_adaptive_frame_field_flag != FALSE)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ seqParam->direct_8x8_inference_flag = extS->direct_8x8_inference_flag;
+ seqParam->frame_cropping_flag = extS->frame_cropping_flag ;
+ if (extS->frame_cropping_flag != FALSE)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ seqParam->frame_crop_bottom_offset = 0;
+ seqParam->frame_crop_left_offset = 0;
+ seqParam->frame_crop_right_offset = 0;
+ seqParam->frame_crop_top_offset = 0;
+ seqParam->vui_parameters_present_flag = extS->vui_parameters_present_flag;
+ if (extS->vui_parameters_present_flag)
+ {
+ memcpy(&(seqParam->vui_parameters), &(extS->vui_parameters), sizeof(AVCVUIParams));
+ }
+ }
+ else
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ /***************** now PPS ******************************/
+ if (!extP && !extS)
+ {
+ picParam->pic_parameter_set_id = (uint)(-1); /* start with zero */
+ picParam->seq_parameter_set_id = (uint)(-1); /* start with zero */
+ picParam->entropy_coding_mode_flag = 0; /* default to CAVLC */
+ picParam->pic_order_present_flag = 0; /* default for now, will need it for B-slice */
+ /* FMO */
+ if (encParam->num_slice_group < 1 || encParam->num_slice_group > MAX_NUM_SLICE_GROUP)
+ {
+ return AVCENC_INVALID_NUM_SLICEGROUP;
+ }
+ picParam->num_slice_groups_minus1 = encParam->num_slice_group - 1;
+
+ if (picParam->num_slice_groups_minus1 > 0)
+ {
+ picParam->slice_group_map_type = encParam->fmo_type;
+ switch (encParam->fmo_type)
+ {
+ case 0:
+ for (ii = 0; ii <= (int)picParam->num_slice_groups_minus1; ii++)
+ {
+ picParam->run_length_minus1[ii] = encParam->run_length_minus1[ii];
+ }
+ break;
+ case 2:
+ for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++)
+ {
+ picParam->top_left[ii] = encParam->top_left[ii];
+ picParam->bottom_right[ii] = encParam->bottom_right[ii];
+ }
+ break;
+ case 3:
+ case 4:
+ case 5:
+ if (encParam->change_dir_flag == AVC_ON)
+ {
+ picParam->slice_group_change_direction_flag = TRUE;
+ }
+ else
+ {
+ picParam->slice_group_change_direction_flag = FALSE;
+ }
+ if (/*encParam->change_rate_minus1 < 0 || (no need it's unsigned) */
+ encParam->change_rate_minus1 > video->PicSizeInMapUnits - 1)
+ {
+ return AVCENC_INVALID_CHANGE_RATE;
+ }
+ picParam->slice_group_change_rate_minus1 = encParam->change_rate_minus1;
+ video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1;
+ break;
+ case 6:
+ picParam->pic_size_in_map_units_minus1 = video->PicSizeInMapUnits - 1;
+
+ /* allocate picParam->slice_group_id */
+ picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR);
+ if (picParam->slice_group_id == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ if (encParam->slice_group == NULL)
+ {
+ return AVCENC_ENCPARAM_MEM_FAIL;
+ }
+ for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++)
+ {
+ picParam->slice_group_id[ii] = encParam->slice_group[ii];
+ }
+ break;
+ default:
+ return AVCENC_INVALID_FMO_TYPE;
+ }
+ }
+ picParam->num_ref_idx_l0_active_minus1 = encParam->num_ref_frame - 1; /* assume frame only */
+ picParam->num_ref_idx_l1_active_minus1 = 0; /* default value */
+ picParam->weighted_pred_flag = 0; /* no weighted prediction supported */
+ picParam->weighted_bipred_idc = 0; /* range 0,1,2 */
+ if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */
+ picParam->weighted_bipred_idc > 2)
+ {
+ return AVCENC_WEIGHTED_BIPRED_FAIL;
+ }
+ picParam->pic_init_qp_minus26 = 0; /* default, will be changed at slice level anyway */
+ if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)
+ {
+ return AVCENC_INIT_QP_FAIL; /* out of range */
+ }
+ picParam->pic_init_qs_minus26 = 0;
+ if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)
+ {
+ return AVCENC_INIT_QS_FAIL; /* out of range */
+ }
+
+ picParam->chroma_qp_index_offset = 0; /* default to zero for now */
+ if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)
+ {
+ return AVCENC_CHROMA_QP_FAIL; /* out of range */
+ }
+ /* deblocking */
+ picParam->deblocking_filter_control_present_flag = (encParam->db_filter == AVC_ON) ? TRUE : FALSE ;
+ /* constrained intra prediction */
+ picParam->constrained_intra_pred_flag = (encParam->constrained_intra_pred == AVC_ON) ? TRUE : FALSE;
+ picParam->redundant_pic_cnt_present_flag = 0; /* default */
+ }
+ else if (extP)// external PPS
+ {
+ picParam->pic_parameter_set_id = extP->pic_parameter_set_id - 1; /* to be increased by one */
+ picParam->seq_parameter_set_id = extP->seq_parameter_set_id;
+ picParam->entropy_coding_mode_flag = extP->entropy_coding_mode_flag;
+ if (extP->entropy_coding_mode_flag != 0) /* default to CAVLC */
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+ picParam->pic_order_present_flag = extP->pic_order_present_flag; /* default for now, will need it for B-slice */
+ if (extP->pic_order_present_flag != 0)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+ /* FMO */
+ if (/*(extP->num_slice_groups_minus1<0) || (no need it's unsigned) */
+ (extP->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1))
+ {
+ return AVCENC_INVALID_NUM_SLICEGROUP;
+ }
+ picParam->num_slice_groups_minus1 = extP->num_slice_groups_minus1;
+
+ if (picParam->num_slice_groups_minus1 > 0)
+ {
+ picParam->slice_group_map_type = extP->slice_group_map_type;
+ switch (extP->slice_group_map_type)
+ {
+ case 0:
+ for (ii = 0; ii <= (int)extP->num_slice_groups_minus1; ii++)
+ {
+ picParam->run_length_minus1[ii] = extP->run_length_minus1[ii];
+ }
+ break;
+ case 2:
+ for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++)
+ {
+ picParam->top_left[ii] = extP->top_left[ii];
+ picParam->bottom_right[ii] = extP->bottom_right[ii];
+ }
+ break;
+ case 3:
+ case 4:
+ case 5:
+ picParam->slice_group_change_direction_flag = extP->slice_group_change_direction_flag;
+ if (/*extP->slice_group_change_rate_minus1 < 0 || (no need, it's unsigned) */
+ extP->slice_group_change_rate_minus1 > video->PicSizeInMapUnits - 1)
+ {
+ return AVCENC_INVALID_CHANGE_RATE;
+ }
+ picParam->slice_group_change_rate_minus1 = extP->slice_group_change_rate_minus1;
+ video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1;
+ break;
+ case 6:
+ if (extP->pic_size_in_map_units_minus1 != video->PicSizeInMapUnits - 1)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ picParam->pic_size_in_map_units_minus1 = extP->pic_size_in_map_units_minus1;
+
+ /* allocate picParam->slice_group_id */
+ picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR);
+ if (picParam->slice_group_id == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ if (extP->slice_group_id == NULL)
+ {
+ return AVCENC_ENCPARAM_MEM_FAIL;
+ }
+ for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++)
+ {
+ picParam->slice_group_id[ii] = extP->slice_group_id[ii];
+ }
+ break;
+ default:
+ return AVCENC_INVALID_FMO_TYPE;
+ }
+ }
+ picParam->num_ref_idx_l0_active_minus1 = extP->num_ref_idx_l0_active_minus1;
+ picParam->num_ref_idx_l1_active_minus1 = extP->num_ref_idx_l1_active_minus1; /* default value */
+ if (picParam->num_ref_idx_l1_active_minus1 != 0)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ if (extP->weighted_pred_flag)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ picParam->weighted_pred_flag = 0; /* no weighted prediction supported */
+ picParam->weighted_bipred_idc = extP->weighted_bipred_idc; /* range 0,1,2 */
+ if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */
+ picParam->weighted_bipred_idc > 2)
+ {
+ return AVCENC_WEIGHTED_BIPRED_FAIL;
+ }
+ picParam->pic_init_qp_minus26 = extP->pic_init_qp_minus26; /* default, will be changed at slice level anyway */
+ if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)
+ {
+ return AVCENC_INIT_QP_FAIL; /* out of range */
+ }
+ picParam->pic_init_qs_minus26 = extP->pic_init_qs_minus26;
+ if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)
+ {
+ return AVCENC_INIT_QS_FAIL; /* out of range */
+ }
+
+ picParam->chroma_qp_index_offset = extP->chroma_qp_index_offset; /* default to zero for now */
+ if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)
+ {
+ return AVCENC_CHROMA_QP_FAIL; /* out of range */
+ }
+ /* deblocking */
+ picParam->deblocking_filter_control_present_flag = extP->deblocking_filter_control_present_flag;
+ /* constrained intra prediction */
+ picParam->constrained_intra_pred_flag = extP->constrained_intra_pred_flag;
+ if (extP->redundant_pic_cnt_present_flag != 0)
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+ picParam->redundant_pic_cnt_present_flag = extP->redundant_pic_cnt_present_flag; /* default */
+ }
+ else
+ {
+ return AVCENC_NOT_SUPPORTED;
+ }
+
+ /****************** now set up some SliceHeader parameters ***********/
+ if (picParam->deblocking_filter_control_present_flag == TRUE)
+ {
+ /* these values only present when db_filter is ON */
+ if (encParam->disable_db_idc > 2)
+ {
+ return AVCENC_INVALID_DEBLOCK_IDC; /* out of range */
+ }
+ sliceHdr->disable_deblocking_filter_idc = encParam->disable_db_idc;
+
+ if (encParam->alpha_offset < -6 || encParam->alpha_offset > 6)
+ {
+ return AVCENC_INVALID_ALPHA_OFFSET;
+ }
+ sliceHdr->slice_alpha_c0_offset_div2 = encParam->alpha_offset;
+
+ if (encParam->beta_offset < -6 || encParam->beta_offset > 6)
+ {
+ return AVCENC_INVALID_BETA_OFFSET;
+ }
+ sliceHdr->slice_beta_offset_div_2 = encParam->beta_offset;
+ }
+ if (encvid->outOfBandParamSet == TRUE)
+ {
+ sliceHdr->idr_pic_id = 0;
+ }
+ else
+ {
+ sliceHdr->idr_pic_id = (uint)(-1); /* start with zero */
+ }
+ sliceHdr->field_pic_flag = FALSE;
+ sliceHdr->bottom_field_flag = FALSE; /* won't be used anyway */
+ video->MbaffFrameFlag = (seqParam->mb_adaptive_frame_field_flag && !sliceHdr->field_pic_flag);
+
+ /* the rest will be set in InitSlice() */
+
+ /* now the rate control and performance related parameters */
+ rateCtrl->scdEnable = (encParam->auto_scd == AVC_ON) ? TRUE : FALSE;
+ rateCtrl->idrPeriod = encParam->idr_period + 1;
+ rateCtrl->intraMBRate = encParam->intramb_refresh;
+ rateCtrl->dpEnable = (encParam->data_par == AVC_ON) ? TRUE : FALSE;
+
+ rateCtrl->subPelEnable = (encParam->sub_pel == AVC_ON) ? TRUE : FALSE;
+ rateCtrl->mvRange = encParam->search_range;
+
+ rateCtrl->subMBEnable = (encParam->submb_pred == AVC_ON) ? TRUE : FALSE;
+ rateCtrl->rdOptEnable = (encParam->rdopt_mode == AVC_ON) ? TRUE : FALSE;
+ rateCtrl->bidirPred = (encParam->bidir_pred == AVC_ON) ? TRUE : FALSE;
+
+ rateCtrl->rcEnable = (encParam->rate_control == AVC_ON) ? TRUE : FALSE;
+ rateCtrl->initQP = encParam->initQP;
+ rateCtrl->initQP = AVC_CLIP3(0, 51, rateCtrl->initQP);
+
+ rateCtrl->bitRate = encParam->bitrate;
+ rateCtrl->cpbSize = encParam->CPB_size;
+ rateCtrl->initDelayOffset = (rateCtrl->bitRate * encParam->init_CBP_removal_delay / 1000);
+
+ if (encParam->frame_rate == 0)
+ {
+ return AVCENC_INVALID_FRAMERATE;
+ }
+
+ rateCtrl->frame_rate = (OsclFloat)(encParam->frame_rate * 1.0 / 1000);
+// rateCtrl->srcInterval = encParam->src_interval;
+ rateCtrl->first_frame = 1; /* set this flag for the first time */
+
+ /* contrained_setx_flag will be set inside the VerifyProfile called below.*/
+ if (!extS && !extP)
+ {
+ seqParam->profile_idc = encParam->profile;
+ seqParam->constrained_set0_flag = FALSE;
+ seqParam->constrained_set1_flag = FALSE;
+ seqParam->constrained_set2_flag = FALSE;
+ seqParam->constrained_set3_flag = FALSE;
+ seqParam->level_idc = encParam->level;
+ }
+ else
+ {
+ seqParam->profile_idc = extS->profile_idc;
+ seqParam->constrained_set0_flag = extS->constrained_set0_flag;
+ seqParam->constrained_set1_flag = extS->constrained_set1_flag;
+ seqParam->constrained_set2_flag = extS->constrained_set2_flag;
+ seqParam->constrained_set3_flag = extS->constrained_set3_flag;
+ seqParam->level_idc = extS->level_idc;
+ }
+
+
+ status = VerifyProfile(encvid, seqParam, picParam);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ status = VerifyLevel(encvid, seqParam, picParam);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+/* verify the profile setting */
+AVCEnc_Status VerifyProfile(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam)
+{
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+
+ if (seqParam->profile_idc == 0) /* find profile for this setting */
+ {
+ /* find the right profile for it */
+ if (seqParam->direct_8x8_inference_flag == TRUE &&
+ picParam->entropy_coding_mode_flag == FALSE &&
+ picParam->num_slice_groups_minus1 <= 7 /*&&
+ picParam->num_slice_groups_minus1>=0 (no need, it's unsigned) */)
+ {
+ seqParam->profile_idc = AVC_EXTENDED;
+ seqParam->constrained_set2_flag = TRUE;
+ }
+
+ if (rateCtrl->dpEnable == FALSE &&
+ picParam->num_slice_groups_minus1 == 0 &&
+ picParam->redundant_pic_cnt_present_flag == FALSE)
+ {
+ seqParam->profile_idc = AVC_MAIN;
+ seqParam->constrained_set1_flag = TRUE;
+ }
+
+ if (rateCtrl->bidirPred == FALSE &&
+ rateCtrl->dpEnable == FALSE &&
+ seqParam->frame_mbs_only_flag == TRUE &&
+ picParam->weighted_pred_flag == FALSE &&
+ picParam->weighted_bipred_idc == 0 &&
+ picParam->entropy_coding_mode_flag == FALSE &&
+ picParam->num_slice_groups_minus1 <= 7 /*&&
+ picParam->num_slice_groups_minus1>=0 (no need, it's unsigned)*/)
+ {
+ seqParam->profile_idc = AVC_BASELINE;
+ seqParam->constrained_set0_flag = TRUE;
+ }
+
+ if (seqParam->profile_idc == 0) /* still zero */
+ {
+ return AVCENC_PROFILE_NOT_SUPPORTED;
+ }
+ }
+
+ /* check the list of supported profile by this library */
+ switch (seqParam->profile_idc)
+ {
+ case AVC_BASELINE:
+ if (rateCtrl->bidirPred == TRUE ||
+ rateCtrl->dpEnable == TRUE ||
+ seqParam->frame_mbs_only_flag != TRUE ||
+ picParam->weighted_pred_flag == TRUE ||
+ picParam->weighted_bipred_idc != 0 ||
+ picParam->entropy_coding_mode_flag == TRUE ||
+ picParam->num_slice_groups_minus1 > 7 /*||
+ picParam->num_slice_groups_minus1<0 (no need, it's unsigned) */)
+ {
+ status = AVCENC_TOOLS_NOT_SUPPORTED;
+ }
+ break;
+
+ case AVC_MAIN:
+ case AVC_EXTENDED:
+ status = AVCENC_PROFILE_NOT_SUPPORTED;
+ }
+
+ return status;
+}
+
+/* verify the level setting */
+AVCEnc_Status VerifyLevel(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam)
+{
+ (void)(picParam);
+
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ AVCCommonObj *video = encvid->common;
+ int mb_per_sec, ii;
+ int lev_idx;
+ int dpb_size;
+
+ mb_per_sec = (int)(video->PicSizeInMbs * rateCtrl->frame_rate + 0.5);
+ dpb_size = (seqParam->num_ref_frames * video->PicSizeInMbs * 3) >> 6;
+
+ if (seqParam->level_idc == 0) /* find level for this setting */
+ {
+ for (ii = 0; ii < MAX_LEVEL_IDX; ii++)
+ {
+ if (mb_per_sec <= MaxMBPS[ii] &&
+ video->PicSizeInMbs <= (uint)MaxFS[ii] &&
+ rateCtrl->bitRate <= (int32)MaxBR[ii]*1000 &&
+ rateCtrl->cpbSize <= (int32)MaxCPB[ii]*1000 &&
+ rateCtrl->mvRange <= MaxVmvR[ii] &&
+ dpb_size <= MaxDPBX2[ii]*512)
+ {
+ seqParam->level_idc = mapIdx2Lev[ii];
+ break;
+ }
+ }
+ if (seqParam->level_idc == 0)
+ {
+ return AVCENC_LEVEL_NOT_SUPPORTED;
+ }
+ }
+
+ /* check if this level is supported by this library */
+ lev_idx = mapLev2Idx[seqParam->level_idc];
+ if (seqParam->level_idc == AVC_LEVEL1_B)
+ {
+ seqParam->constrained_set3_flag = 1;
+ }
+
+
+ if (lev_idx == 255) /* not defined */
+ {
+ return AVCENC_LEVEL_NOT_SUPPORTED;
+ }
+
+ /* check if the encoding setting complies with the level */
+ if (mb_per_sec > MaxMBPS[lev_idx] ||
+ video->PicSizeInMbs > (uint)MaxFS[lev_idx] ||
+ rateCtrl->bitRate > (int32)MaxBR[lev_idx]*1000 ||
+ rateCtrl->cpbSize > (int32)MaxCPB[lev_idx]*1000 ||
+ rateCtrl->mvRange > MaxVmvR[lev_idx])
+ {
+ return AVCENC_LEVEL_FAIL;
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+/* initialize variables at the beginning of each frame */
+/* determine the picture type */
+/* encode POC */
+/* maybe we should do more stuff here. MotionEstimation+SCD and generate a new SPS and PPS */
+AVCEnc_Status InitFrame(AVCEncObject *encvid)
+{
+ AVCStatus ret;
+ AVCEnc_Status status;
+ AVCCommonObj *video = encvid->common;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+
+ /* look for the next frame in coding_order and look for available picture
+ in the DPB. Note, video->currFS->PicOrderCnt, currFS->FrameNum and currPic->PicNum
+ are set to wrong number in this function (right for decoder). */
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ // call init DPB in here.
+ ret = AVCConfigureSequence(encvid->avcHandle, video, TRUE);
+ if (ret != AVC_SUCCESS)
+ {
+ return AVCENC_FAIL;
+ }
+ }
+
+ /* flexible macroblock ordering (every frame)*/
+ /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */
+ /* It changes once per each PPS. */
+ FMOInit(video);
+
+ ret = DPBInitBuffer(encvid->avcHandle, video); // get new buffer
+
+ if (ret != AVC_SUCCESS)
+ {
+ return (AVCEnc_Status)ret; // AVCENC_PICTURE_READY, FAIL
+ }
+
+ DPBInitPic(video, 0); /* 0 is dummy */
+
+ /************* determine picture type IDR or non-IDR ***********/
+ video->currPicType = AVC_FRAME;
+ video->slice_data_partitioning = FALSE;
+ encvid->currInput->is_reference = 1; /* default to all frames */
+ video->nal_ref_idc = 1; /* need to set this for InitPOC */
+ video->currPic->isReference = TRUE;
+
+ /************* set frame_num ********************/
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ video->prevFrameNum = video->MaxFrameNum;
+ video->PrevRefFrameNum = 0;
+ sliceHdr->frame_num = 0;
+ }
+ /* otherwise, it's set to previous reference frame access unit's frame_num in decoding order,
+ see the end of PVAVCDecodeSlice()*/
+ /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */
+ /* Basically, frame_num can't be repeated unless it's opposite fields or non reference fields */
+ else
+ {
+ sliceHdr->frame_num = (video->PrevRefFrameNum + 1) % video->MaxFrameNum;
+ }
+ video->CurrPicNum = sliceHdr->frame_num; /* for field_pic_flag = 0 */
+ //video->CurrPicNum = 2*sliceHdr->frame_num + 1; /* for field_pic_flag = 1 */
+
+ /* assign pic_order_cnt, video->PicOrderCnt */
+ status = InitPOC(encvid);
+ if (status != AVCENC_SUCCESS) /* incorrigable fail */
+ {
+ return status;
+ }
+
+ /* Initialize refListIdx for this picture */
+ RefListInit(video);
+
+ /************* motion estimation and scene analysis ************/
+ // , to move this to MB-based MV search for comparison
+ // use sub-optimal QP for mv search
+ AVCMotionEstimation(encvid); /* AVCENC_SUCCESS or AVCENC_NEW_IDR */
+
+ /* after this point, the picture type will be fixed to either IDR or non-IDR */
+ video->currFS->PicOrderCnt = video->PicOrderCnt;
+ video->currFS->FrameNum = video->sliceHdr->frame_num;
+ video->currPic->PicNum = video->CurrPicNum;
+ video->mbNum = 0; /* start from zero MB */
+ encvid->currSliceGroup = 0; /* start from slice group #0 */
+ encvid->numIntraMB = 0; /* reset this counter */
+
+ if (video->nal_unit_type == AVC_NALTYPE_IDR)
+ {
+ RCInitGOP(encvid);
+
+ /* calculate picture QP */
+ RCInitFrameQP(encvid);
+
+ return AVCENC_NEW_IDR;
+ }
+
+ /* calculate picture QP */
+ RCInitFrameQP(encvid); /* get QP after MV search */
+
+ return AVCENC_SUCCESS;
+}
+
+/* initialize variables for this slice */
+AVCEnc_Status InitSlice(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ AVCPicParamSet *currPPS = video->currPicParams;
+ AVCSeqParamSet *currSPS = video->currSeqParams;
+ int slice_type = video->slice_type;
+
+ sliceHdr->first_mb_in_slice = video->mbNum;
+ if (video->mbNum) // not first slice of a frame
+ {
+ video->sliceHdr->slice_type = (AVCSliceType)slice_type;
+ }
+
+ /* sliceHdr->slice_type already set in InitFrame */
+
+ sliceHdr->pic_parameter_set_id = video->currPicParams->pic_parameter_set_id;
+
+ /* sliceHdr->frame_num already set in InitFrame */
+
+ if (!currSPS->frame_mbs_only_flag) /* we shouldn't need this check */
+ {
+ sliceHdr->field_pic_flag = sliceHdr->bottom_field_flag = FALSE;
+ return AVCENC_TOOLS_NOT_SUPPORTED;
+ }
+
+ /* sliceHdr->idr_pic_id already set in PVAVCEncodeNAL
+
+ sliceHdr->pic_order_cnt_lsb already set in InitFrame..InitPOC
+ sliceHdr->delta_pic_order_cnt_bottom already set in InitPOC
+
+ sliceHdr->delta_pic_order_cnt[0] already set in InitPOC
+ sliceHdr->delta_pic_order_cnt[1] already set in InitPOC
+ */
+
+ sliceHdr->redundant_pic_cnt = 0; /* default if(currPPS->redundant_pic_cnt_present_flag), range 0..127 */
+ sliceHdr->direct_spatial_mv_pred_flag = 0; // default if(slice_type == AVC_B_SLICE)
+
+ sliceHdr->num_ref_idx_active_override_flag = FALSE; /* default, if(slice_type== P,SP or B)*/
+ sliceHdr->num_ref_idx_l0_active_minus1 = 0; /* default, if (num_ref_idx_active_override_flag) */
+ sliceHdr->num_ref_idx_l1_active_minus1 = 0; /* default, if above and B_slice */
+ /* the above 2 values range from 0..15 for frame picture and 0..31 for field picture */
+
+ /* ref_pic_list_reordering(), currently we don't do anything */
+ sliceHdr->ref_pic_list_reordering_flag_l0 = FALSE; /* default */
+ sliceHdr->ref_pic_list_reordering_flag_l1 = FALSE; /* default */
+ /* if the above are TRUE, some other params must be set */
+
+ if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) ||
+ (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE))
+ {
+ // pred_weight_table(); // not supported !!
+ return AVCENC_TOOLS_NOT_SUPPORTED;
+ }
+
+ /* dec_ref_pic_marking(), this will be done later*/
+ sliceHdr->no_output_of_prior_pics_flag = FALSE; /* default */
+ sliceHdr->long_term_reference_flag = FALSE; /* for IDR frame, do not make it long term */
+ sliceHdr->adaptive_ref_pic_marking_mode_flag = FALSE; /* default */
+ /* other params are not set here because they are not used */
+
+ sliceHdr->cabac_init_idc = 0; /* default, if entropy_coding_mode_flag && slice_type==I or SI, range 0..2 */
+ sliceHdr->slice_qp_delta = 0; /* default for now */
+ sliceHdr->sp_for_switch_flag = FALSE; /* default, if slice_type == SP */
+ sliceHdr->slice_qs_delta = 0; /* default, if slice_type == SP or SI */
+
+ /* derived variables from encParam */
+ /* deblocking filter */
+ video->FilterOffsetA = video->FilterOffsetB = 0;
+ if (currPPS->deblocking_filter_control_present_flag == TRUE)
+ {
+ video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1;
+ video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1;
+ }
+
+ /* flexible macroblock ordering */
+ /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */
+ /* We already call it at the end of PVAVCEncInitialize(). It changes once per each PPS. */
+ if (video->currPicParams->num_slice_groups_minus1 > 0 && video->currPicParams->slice_group_map_type >= 3
+ && video->currPicParams->slice_group_map_type <= 5)
+ {
+ sliceHdr->slice_group_change_cycle = SLICE_GROUP_CHANGE_CYCLE; /* default, don't understand how to set it!!!*/
+
+ video->MapUnitsInSliceGroup0 =
+ AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits);
+
+ FMOInit(video);
+ }
+
+ /* calculate SliceQPy first */
+ /* calculate QSy first */
+
+ sliceHdr->slice_qp_delta = video->QPy - 26 - currPPS->pic_init_qp_minus26;
+ //sliceHdr->slice_qs_delta = video->QSy - 26 - currPPS->pic_init_qs_minus26;
+
+ return AVCENC_SUCCESS;
+}
+
diff --git a/media/libstagefright/codecs/avc/enc/src/intra_est.cpp b/media/libstagefright/codecs/avc/enc/src/intra_est.cpp
new file mode 100644
index 0000000..17e5985
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/intra_est.cpp
@@ -0,0 +1,2199 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+#define TH_I4 0 /* threshold biasing toward I16 mode instead of I4 mode */
+#define TH_Intra 0 /* threshold biasing toward INTER mode instead of intra mode */
+
+#define FIXED_INTRAPRED_MODE AVC_I16
+#define FIXED_I16_MODE AVC_I16_DC
+#define FIXED_I4_MODE AVC_I4_Diagonal_Down_Left
+#define FIXED_INTRA_CHROMA_MODE AVC_IC_DC
+
+#define CLIP_RESULT(x) if((uint)x > 0xFF){ \
+ x = 0xFF & (~(x>>31));}
+
+
+bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCFrameIO *currInput = encvid->currInput;
+ int orgPitch = currInput->pitch;
+ int x_pos = (video->mb_x) << 4;
+ int y_pos = (video->mb_y) << 4;
+ uint8 *orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos;
+ int j;
+ uint8 *topL, *leftL, *orgY_2, *orgY_3;
+ int temp, SBE, offset;
+ OsclFloat ABE;
+ bool intra = true;
+
+ if (((x_pos >> 4) != (int)video->PicWidthInMbs - 1) &&
+ ((y_pos >> 4) != (int)video->PicHeightInMbs - 1) &&
+ video->intraAvailA &&
+ video->intraAvailB)
+ {
+ SBE = 0;
+ /* top neighbor */
+ topL = curL - picPitch;
+ /* left neighbor */
+ leftL = curL - 1;
+ orgY_2 = orgY - orgPitch;
+
+ for (j = 0; j < 16; j++)
+ {
+ temp = *topL++ - orgY[j];
+ SBE += ((temp >= 0) ? temp : -temp);
+ temp = *(leftL += picPitch) - *(orgY_2 += orgPitch);
+ SBE += ((temp >= 0) ? temp : -temp);
+ }
+
+ /* calculate chroma */
+ offset = (y_pos >> 2) * picPitch + (x_pos >> 1);
+ topL = video->currPic->Scb + offset;
+ orgY_2 = currInput->YCbCr[1] + offset + (y_pos >> 2) * (orgPitch - picPitch);
+
+ leftL = topL - 1;
+ topL -= (picPitch >> 1);
+ orgY_3 = orgY_2 - (orgPitch >> 1);
+ for (j = 0; j < 8; j++)
+ {
+ temp = *topL++ - orgY_2[j];
+ SBE += ((temp >= 0) ? temp : -temp);
+ temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1));
+ SBE += ((temp >= 0) ? temp : -temp);
+ }
+
+ topL = video->currPic->Scr + offset;
+ orgY_2 = currInput->YCbCr[2] + offset + (y_pos >> 2) * (orgPitch - picPitch);
+
+ leftL = topL - 1;
+ topL -= (picPitch >> 1);
+ orgY_3 = orgY_2 - (orgPitch >> 1);
+ for (j = 0; j < 8; j++)
+ {
+ temp = *topL++ - orgY_2[j];
+ SBE += ((temp >= 0) ? temp : -temp);
+ temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1));
+ SBE += ((temp >= 0) ? temp : -temp);
+ }
+
+ /* compare mincost/384 and SBE/64 */
+ ABE = SBE / 64.0;
+ if (ABE*0.8 >= min_cost / 384.0)
+ {
+ intra = false;
+ }
+ }
+
+ return intra;
+}
+
+/* perform searching for MB mode */
+/* assuming that this is done inside the encoding loop,
+no need to call InitNeighborAvailability */
+
+void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCFrameIO *currInput = encvid->currInput;
+ AVCMacroblock *currMB = video->currMB;
+ int min_cost;
+ uint8 *orgY;
+ int x_pos = (video->mb_x) << 4;
+ int y_pos = (video->mb_y) << 4;
+ uint32 *saved_inter;
+ int j;
+ int orgPitch = currInput->pitch;
+ bool intra = true;
+
+ currMB->CBP = 0;
+
+ /* first do motion vector and variable block size search */
+ min_cost = encvid->min_cost[mbnum];
+
+ /* now perform intra prediction search */
+ /* need to add the check for encvid->intraSearch[video->mbNum] to skip intra
+ if it's not worth checking. */
+ if (video->slice_type == AVC_P_SLICE)
+ {
+ /* Decide whether intra search is necessary or not */
+ /* This one, we do it in the encoding loop so the neighboring pixel are the
+ actual reconstructed pixels. */
+ intra = IntraDecisionABE(encvid, min_cost, curL, picPitch);
+ }
+
+ if (intra == true || video->slice_type == AVC_I_SLICE)
+ {
+ orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos;
+
+ /* i16 mode search */
+ /* generate all the predictions */
+ intrapred_luma_16x16(encvid);
+
+ /* evaluate them one by one */
+ find_cost_16x16(encvid, orgY, &min_cost);
+
+ if (video->slice_type == AVC_P_SLICE)
+ {
+ /* save current inter prediction */
+ saved_inter = encvid->subpel_pred; /* reuse existing buffer */
+ j = 16;
+ curL -= 4;
+ picPitch -= 16;
+ while (j--)
+ {
+ *saved_inter++ = *((uint32*)(curL += 4));
+ *saved_inter++ = *((uint32*)(curL += 4));
+ *saved_inter++ = *((uint32*)(curL += 4));
+ *saved_inter++ = *((uint32*)(curL += 4));
+ curL += picPitch;
+ }
+
+ }
+
+ /* i4 mode search */
+ mb_intra4x4_search(encvid, &min_cost);
+
+ encvid->min_cost[mbnum] = min_cost; /* update min_cost */
+ }
+
+
+ if (currMB->mb_intra)
+ {
+ chroma_intra_search(encvid);
+
+ /* need to set this in order for the MBInterPrediction to work!! */
+ memset(currMB->mvL0, 0, sizeof(int32)*16);
+ currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] =
+ currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = -1;
+ }
+ else if (video->slice_type == AVC_P_SLICE && intra == true)
+ {
+ /* restore current inter prediction */
+ saved_inter = encvid->subpel_pred; /* reuse existing buffer */
+ j = 16;
+ curL -= ((picPitch + 16) << 4);
+ while (j--)
+ {
+ *((uint32*)(curL += 4)) = *saved_inter++;
+ *((uint32*)(curL += 4)) = *saved_inter++;
+ *((uint32*)(curL += 4)) = *saved_inter++;
+ *((uint32*)(curL += 4)) = *saved_inter++;
+ curL += picPitch;
+ }
+ }
+
+ return ;
+}
+
+/* generate all the prediction values */
+void intrapred_luma_16x16(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCPictureData *currPic = video->currPic;
+
+ int x_pos = (video->mb_x) << 4;
+ int y_pos = (video->mb_y) << 4;
+ int pitch = currPic->pitch;
+
+ int offset = y_pos * pitch + x_pos;
+
+ uint8 *pred, *top, *left;
+ uint8 *curL = currPic->Sl + offset; /* point to reconstructed frame */
+ uint32 word1, word2, word3, word4;
+ uint32 sum = 0;
+
+ int a_16, b, c, factor_c;
+ uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;
+ int H = 0, V = 0, tmp, value;
+ int i;
+
+ if (video->intraAvailB)
+ {
+ //get vertical prediction mode
+ top = curL - pitch;
+
+ pred = encvid->pred_i16[AVC_I16_Vertical] - 16;
+
+ word1 = *((uint32*)(top)); /* read 4 bytes from top */
+ word2 = *((uint32*)(top + 4)); /* read 4 bytes from top */
+ word3 = *((uint32*)(top + 8)); /* read 4 bytes from top */
+ word4 = *((uint32*)(top + 12)); /* read 4 bytes from top */
+
+ for (i = 0; i < 16; i++)
+ {
+ *((uint32*)(pred += 16)) = word1;
+ *((uint32*)(pred + 4)) = word2;
+ *((uint32*)(pred + 8)) = word3;
+ *((uint32*)(pred + 12)) = word4;
+
+ }
+
+ sum = word1 & 0xFF00FF;
+ word1 = (word1 >> 8) & 0xFF00FF;
+ sum += word1;
+ word1 = (word2 & 0xFF00FF);
+ sum += word1;
+ word2 = (word2 >> 8) & 0xFF00FF;
+ sum += word2;
+ word1 = (word3 & 0xFF00FF);
+ sum += word1;
+ word3 = (word3 >> 8) & 0xFF00FF;
+ sum += word3;
+ word1 = (word4 & 0xFF00FF);
+ sum += word1;
+ word4 = (word4 >> 8) & 0xFF00FF;
+ sum += word4;
+
+ sum += (sum >> 16);
+ sum &= 0xFFFF;
+
+ if (!video->intraAvailA)
+ {
+ sum = (sum + 8) >> 4;
+ }
+ }
+
+ if (video->intraAvailA)
+ {
+ // get horizontal mode
+ left = curL - 1 - pitch;
+
+ pred = encvid->pred_i16[AVC_I16_Horizontal] - 16;
+
+ for (i = 0; i < 16; i++)
+ {
+ word1 = *(left += pitch);
+ sum += word1;
+
+ word1 = (word1 << 8) | word1;
+ word1 = (word1 << 16) | word1; /* make it 4 */
+
+ *(uint32*)(pred += 16) = word1;
+ *(uint32*)(pred + 4) = word1;
+ *(uint32*)(pred + 8) = word1;
+ *(uint32*)(pred + 12) = word1;
+ }
+
+ if (!video->intraAvailB)
+ {
+ sum = (sum + 8) >> 4;
+ }
+ else
+ {
+ sum = (sum + 16) >> 5;
+ }
+ }
+
+ // get DC mode
+ if (!video->intraAvailA && !video->intraAvailB)
+ {
+ sum = 0x80808080;
+ }
+ else
+ {
+ sum = (sum << 8) | sum;
+ sum = (sum << 16) | sum;
+ }
+
+ pred = encvid->pred_i16[AVC_I16_DC] - 16;
+ for (i = 0; i < 16; i++)
+ {
+ *((uint32*)(pred += 16)) = sum;
+ *((uint32*)(pred + 4)) = sum;
+ *((uint32*)(pred + 8)) = sum;
+ *((uint32*)(pred + 12)) = sum;
+ }
+
+ // get plane mode
+ if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
+ {
+ pred = encvid->pred_i16[AVC_I16_Plane] - 16;
+
+ comp_ref_x0 = curL - pitch + 8;
+ comp_ref_x1 = curL - pitch + 6;
+ comp_ref_y0 = curL - 1 + (pitch << 3);
+ comp_ref_y1 = curL - 1 + 6 * pitch;
+
+ for (i = 1; i < 8; i++)
+ {
+ H += i * (*comp_ref_x0++ - *comp_ref_x1--);
+ V += i * (*comp_ref_y0 - *comp_ref_y1);
+ comp_ref_y0 += pitch;
+ comp_ref_y1 -= pitch;
+ }
+
+ H += i * (*comp_ref_x0++ - curL[-pitch-1]);
+ V += i * (*comp_ref_y0 - *comp_ref_y1);
+
+
+ a_16 = ((*(curL - pitch + 15) + *(curL - 1 + 15 * pitch)) << 4) + 16;;
+ b = (5 * H + 32) >> 6;
+ c = (5 * V + 32) >> 6;
+
+ tmp = 0;
+ for (i = 0; i < 16; i++)
+ {
+ factor_c = a_16 + c * (tmp++ - 7);
+ factor_c -= 7 * b;
+
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = value;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 8);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 16);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 24);
+ *((uint32*)(pred += 16)) = word1;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = value;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 8);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 16);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 24);
+ *((uint32*)(pred + 4)) = word1;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = value;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 8);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 16);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 24);
+ *((uint32*)(pred + 8)) = word1;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = value;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 8);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 16);
+ value = factor_c >> 5;
+ CLIP_RESULT(value)
+ word1 = (word1) | (value << 24);
+ *((uint32*)(pred + 12)) = word1;
+ }
+ }
+
+ return ;
+}
+
+
+/* evaluate each prediction mode of I16 */
+void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCMacroblock *currMB = video->currMB;
+ int cost;
+ int org_pitch = encvid->currInput->pitch;
+
+ /* evaluate vertical mode */
+ if (video->intraAvailB)
+ {
+ cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Vertical], *min_cost);
+ if (cost < *min_cost)
+ {
+ *min_cost = cost;
+ currMB->mbMode = AVC_I16;
+ currMB->mb_intra = 1;
+ currMB->i16Mode = AVC_I16_Vertical;
+ }
+ }
+
+
+ /* evaluate horizontal mode */
+ if (video->intraAvailA)
+ {
+ cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Horizontal], *min_cost);
+ if (cost < *min_cost)
+ {
+ *min_cost = cost;
+ currMB->mbMode = AVC_I16;
+ currMB->mb_intra = 1;
+ currMB->i16Mode = AVC_I16_Horizontal;
+ }
+ }
+
+ /* evaluate DC mode */
+ cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_DC], *min_cost);
+ if (cost < *min_cost)
+ {
+ *min_cost = cost;
+ currMB->mbMode = AVC_I16;
+ currMB->mb_intra = 1;
+ currMB->i16Mode = AVC_I16_DC;
+ }
+
+ /* evaluate plane mode */
+ if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
+ {
+ cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Plane], *min_cost);
+ if (cost < *min_cost)
+ {
+ *min_cost = cost;
+ currMB->mbMode = AVC_I16;
+ currMB->mb_intra = 1;
+ currMB->i16Mode = AVC_I16_Plane;
+ }
+ }
+
+ return ;
+}
+
+
+int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost)
+{
+
+ int cost;
+ int j, k;
+ int16 res[256], *pres; // residue
+ int m0, m1, m2, m3;
+
+ // calculate SATD
+ org_pitch -= 16;
+ pres = res;
+ // horizontal transform
+ for (j = 0; j < 16; j++)
+ {
+ k = 4;
+ while (k > 0)
+ {
+ m0 = org[0] - pred[0];
+ m3 = org[3] - pred[3];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = org[1] - pred[1];
+ m2 = org[2] - pred[2];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 + m1;
+ pres[2] = m0 - m1;
+ pres[1] = m2 + m3;
+ pres[3] = m3 - m2;
+
+ org += 4;
+ pres += 4;
+ pred += 4;
+ k--;
+ }
+ org += org_pitch;
+ }
+ /* vertical transform */
+ cost = 0;
+ for (j = 0; j < 4; j++)
+ {
+ pres = res + (j << 6);
+ k = 16;
+ while (k > 0)
+ {
+ m0 = pres[0];
+ m3 = pres[3<<4];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = pres[1<<4];
+ m2 = pres[2<<4];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 = m0 + m1;
+
+ if (k&0x3) // only sum up non DC values.
+ {
+ cost += ((m0 > 0) ? m0 : -m0);
+ }
+
+ m1 = m0 - (m1 << 1);
+ cost += ((m1 > 0) ? m1 : -m1);
+ m3 = m2 + m3;
+ cost += ((m3 > 0) ? m3 : -m3);
+ m2 = m3 - (m2 << 1);
+ cost += ((m2 > 0) ? m2 : -m2);
+
+ pres++;
+ k--;
+ }
+ if ((cost >> 1) > min_cost) /* early drop out */
+ {
+ return (cost >> 1);
+ }
+ }
+
+ /* Hadamard of the DC coefficient */
+ pres = res;
+ k = 4;
+ while (k > 0)
+ {
+ m0 = pres[0];
+ m3 = pres[3<<2];
+ m0 >>= 2;
+ m0 += (m3 >> 2);
+ m3 = m0 - (m3 >> 1);
+ m1 = pres[1<<2];
+ m2 = pres[2<<2];
+ m1 >>= 2;
+ m1 += (m2 >> 2);
+ m2 = m1 - (m2 >> 1);
+ pres[0] = (m0 + m1);
+ pres[2<<2] = (m0 - m1);
+ pres[1<<2] = (m2 + m3);
+ pres[3<<2] = (m3 - m2);
+ pres += (4 << 4);
+ k--;
+ }
+
+ pres = res;
+ k = 4;
+ while (k > 0)
+ {
+ m0 = pres[0];
+ m3 = pres[3<<6];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = pres[1<<6];
+ m2 = pres[2<<6];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ m0 = m0 + m1;
+ cost += ((m0 >= 0) ? m0 : -m0);
+ m1 = m0 - (m1 << 1);
+ cost += ((m1 >= 0) ? m1 : -m1);
+ m3 = m2 + m3;
+ cost += ((m3 >= 0) ? m3 : -m3);
+ m2 = m3 - (m2 << 1);
+ cost += ((m2 >= 0) ? m2 : -m2);
+ pres += 4;
+
+ if ((cost >> 1) > min_cost) /* early drop out */
+ {
+ return (cost >> 1);
+ }
+
+ k--;
+ }
+
+ return (cost >> 1);
+}
+
+
+void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCMacroblock *currMB = video->currMB;
+ AVCPictureData *currPic = video->currPic;
+ AVCFrameIO *currInput = encvid->currInput;
+ int pitch = currPic->pitch;
+ int org_pitch = currInput->pitch;
+ int offset;
+ uint8 *curL, *comp, *org4, *org8;
+ int y = video->mb_y << 4;
+ int x = video->mb_x << 4;
+
+ int b8, b4, cost4x4, blkidx;
+ int cost = 0;
+ int numcoef;
+ int dummy = 0;
+ int mb_intra = currMB->mb_intra; // save the original value
+
+ offset = y * pitch + x;
+
+ curL = currPic->Sl + offset;
+ org8 = currInput->YCbCr[0] + y * org_pitch + x;
+ video->pred_pitch = 4;
+
+ cost = (int)(6.0 * encvid->lambda_mode + 0.4999);
+ cost <<= 2;
+
+ currMB->mb_intra = 1; // temporary set this to one to enable the IDCT
+ // operation inside dct_luma
+
+ for (b8 = 0; b8 < 4; b8++)
+ {
+ comp = curL;
+ org4 = org8;
+
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ blkidx = blkIdx2blkXY[b8][b4];
+ cost4x4 = blk_intra4x4_search(encvid, blkidx, comp, org4);
+ cost += cost4x4;
+ if (cost > *min_cost)
+ {
+ currMB->mb_intra = mb_intra; // restore the value
+ return ;
+ }
+
+ /* do residue, Xfrm, Q, invQ, invXfrm, recon and save the DCT coefs.*/
+ video->pred_block = encvid->pred_i4[currMB->i4Mode[blkidx]];
+ numcoef = dct_luma(encvid, blkidx, comp, org4, &dummy);
+ currMB->nz_coeff[blkidx] = numcoef;
+ if (numcoef)
+ {
+ video->cbp4x4 |= (1 << blkidx);
+ currMB->CBP |= (1 << b8);
+ }
+
+ if (b4&1)
+ {
+ comp += ((pitch << 2) - 4);
+ org4 += ((org_pitch << 2) - 4);
+ }
+ else
+ {
+ comp += 4;
+ org4 += 4;
+ }
+ }
+
+ if (b8&1)
+ {
+ curL += ((pitch << 3) - 8);
+ org8 += ((org_pitch << 3) - 8);
+ }
+ else
+ {
+ curL += 8;
+ org8 += 8;
+ }
+ }
+
+ currMB->mb_intra = mb_intra; // restore the value
+
+ if (cost < *min_cost)
+ {
+ *min_cost = cost;
+ currMB->mbMode = AVC_I4;
+ currMB->mb_intra = 1;
+ }
+
+ return ;
+}
+
+
+/* search for i4 mode for a 4x4 block */
+int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCNeighborAvailability availability;
+ AVCMacroblock *currMB = video->currMB;
+ bool top_left = FALSE;
+ int pitch = video->currPic->pitch;
+ uint8 mode_avail[AVCNumI4PredMode];
+ uint32 temp, DC;
+ uint8 *pred;
+ int org_pitch = encvid->currInput->pitch;
+ uint16 min_cost, cost;
+
+ int P_x, Q_x, R_x, P_y, Q_y, R_y, D, D0, D1;
+ int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2;
+ uint8 P_A, P_B, P_C, P_D, P_E, P_F, P_G, P_H, P_I, P_J, P_K, P_L, P_X;
+ int r0, r1, r2, r3, r4, r5, r6, r7;
+ int x0, x1, x2, x3, x4, x5;
+ uint32 temp1, temp2;
+
+ int ipmode, mostProbableMode;
+ int fixedcost = 4 * encvid->lambda_mode;
+ int min_sad = 0x7FFF;
+
+ availability.left = TRUE;
+ availability.top = TRUE;
+ if (blkidx <= 3) /* top row block (!block_y) */
+ { /* check availability up */
+ availability.top = video->intraAvailB ;
+ }
+ if (!(blkidx&0x3)) /* left column block (!block_x)*/
+ { /* check availability left */
+ availability.left = video->intraAvailA ;
+ }
+ availability.top_right = BlkTopRight[blkidx];
+
+ if (availability.top_right == 2)
+ {
+ availability.top_right = video->intraAvailB;
+ }
+ else if (availability.top_right == 3)
+ {
+ availability.top_right = video->intraAvailC;
+ }
+
+ if (availability.top == TRUE)
+ {
+ temp = *(uint32*)(cur - pitch);
+ P_A = temp & 0xFF;
+ P_B = (temp >> 8) & 0xFF;
+ P_C = (temp >> 16) & 0xFF;
+ P_D = (temp >> 24) & 0xFF;
+ }
+ else
+ {
+ P_A = P_B = P_C = P_D = 128;
+ }
+
+ if (availability.top_right == TRUE)
+ {
+ temp = *(uint32*)(cur - pitch + 4);
+ P_E = temp & 0xFF;
+ P_F = (temp >> 8) & 0xFF;
+ P_G = (temp >> 16) & 0xFF;
+ P_H = (temp >> 24) & 0xFF;
+ }
+ else
+ {
+ P_E = P_F = P_G = P_H = 128;
+ }
+
+ if (availability.left == TRUE)
+ {
+ cur--;
+ P_I = *cur;
+ P_J = *(cur += pitch);
+ P_K = *(cur += pitch);
+ P_L = *(cur + pitch);
+ cur -= (pitch << 1);
+ cur++;
+ }
+ else
+ {
+ P_I = P_J = P_K = P_L = 128;
+ }
+
+ /* check if top-left pixel is available */
+ if (((blkidx > 3) && (blkidx&0x3)) || ((blkidx > 3) && video->intraAvailA)
+ || ((blkidx&0x3) && video->intraAvailB)
+ || (video->intraAvailA && video->intraAvailD && video->intraAvailB))
+ {
+ top_left = TRUE;
+ P_X = *(cur - pitch - 1);
+ }
+ else
+ {
+ P_X = 128;
+ }
+
+ //===== INTRA PREDICTION FOR 4x4 BLOCK =====
+ /* vertical */
+ mode_avail[AVC_I4_Vertical] = 0;
+ if (availability.top)
+ {
+ mode_avail[AVC_I4_Vertical] = 1;
+ pred = encvid->pred_i4[AVC_I4_Vertical];
+
+ temp = (P_D << 24) | (P_C << 16) | (P_B << 8) | P_A ;
+ *((uint32*)pred) = temp; /* write 4 at a time */
+ *((uint32*)(pred += 4)) = temp;
+ *((uint32*)(pred += 4)) = temp;
+ *((uint32*)(pred += 4)) = temp;
+ }
+ /* horizontal */
+ mode_avail[AVC_I4_Horizontal] = 0;
+ mode_avail[AVC_I4_Horizontal_Up] = 0;
+ if (availability.left)
+ {
+ mode_avail[AVC_I4_Horizontal] = 1;
+ pred = encvid->pred_i4[AVC_I4_Horizontal];
+
+ temp = P_I | (P_I << 8);
+ temp = temp | (temp << 16);
+ *((uint32*)pred) = temp;
+ temp = P_J | (P_J << 8);
+ temp = temp | (temp << 16);
+ *((uint32*)(pred += 4)) = temp;
+ temp = P_K | (P_K << 8);
+ temp = temp | (temp << 16);
+ *((uint32*)(pred += 4)) = temp;
+ temp = P_L | (P_L << 8);
+ temp = temp | (temp << 16);
+ *((uint32*)(pred += 4)) = temp;
+
+ mode_avail[AVC_I4_Horizontal_Up] = 1;
+ pred = encvid->pred_i4[AVC_I4_Horizontal_Up];
+
+ Q0 = (P_J + P_K + 1) >> 1;
+ Q1 = (P_J + (P_K << 1) + P_L + 2) >> 2;
+ P0 = ((P_I + P_J + 1) >> 1);
+ P1 = ((P_I + (P_J << 1) + P_K + 2) >> 2);
+
+ temp = P0 | (P1 << 8); // [P0 P1 Q0 Q1]
+ temp |= (Q0 << 16); // [Q0 Q1 R0 DO]
+ temp |= (Q1 << 24); // [R0 D0 D1 D1]
+ *((uint32*)pred) = temp; // [D1 D1 D1 D1]
+
+ D0 = (P_K + 3 * P_L + 2) >> 2;
+ R0 = (P_K + P_L + 1) >> 1;
+
+ temp = Q0 | (Q1 << 8);
+ temp |= (R0 << 16);
+ temp |= (D0 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ D1 = P_L;
+
+ temp = R0 | (D0 << 8);
+ temp |= (D1 << 16);
+ temp |= (D1 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = D1 | (D1 << 8);
+ temp |= (temp << 16);
+ *((uint32*)(pred += 4)) = temp;
+ }
+ /* DC */
+ mode_avail[AVC_I4_DC] = 1;
+ pred = encvid->pred_i4[AVC_I4_DC];
+ if (availability.left)
+ {
+ DC = P_I + P_J + P_K + P_L;
+
+ if (availability.top)
+ {
+ DC = (P_A + P_B + P_C + P_D + DC + 4) >> 3;
+ }
+ else
+ {
+ DC = (DC + 2) >> 2;
+
+ }
+ }
+ else if (availability.top)
+ {
+ DC = (P_A + P_B + P_C + P_D + 2) >> 2;
+
+ }
+ else
+ {
+ DC = 128;
+ }
+
+ temp = DC | (DC << 8);
+ temp = temp | (temp << 16);
+ *((uint32*)pred) = temp;
+ *((uint32*)(pred += 4)) = temp;
+ *((uint32*)(pred += 4)) = temp;
+ *((uint32*)(pred += 4)) = temp;
+
+ /* Down-left */
+ mode_avail[AVC_I4_Diagonal_Down_Left] = 0;
+
+ if (availability.top)
+ {
+ mode_avail[AVC_I4_Diagonal_Down_Left] = 1;
+
+ pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Left];
+
+ r0 = P_A;
+ r1 = P_B;
+ r2 = P_C;
+ r3 = P_D;
+
+ r0 += (r1 << 1);
+ r0 += r2;
+ r0 += 2;
+ r0 >>= 2;
+ r1 += (r2 << 1);
+ r1 += r3;
+ r1 += 2;
+ r1 >>= 2;
+
+ if (availability.top_right)
+ {
+ r4 = P_E;
+ r5 = P_F;
+ r6 = P_G;
+ r7 = P_H;
+
+ r2 += (r3 << 1);
+ r2 += r4;
+ r2 += 2;
+ r2 >>= 2;
+ r3 += (r4 << 1);
+ r3 += r5;
+ r3 += 2;
+ r3 >>= 2;
+ r4 += (r5 << 1);
+ r4 += r6;
+ r4 += 2;
+ r4 >>= 2;
+ r5 += (r6 << 1);
+ r5 += r7;
+ r5 += 2;
+ r5 >>= 2;
+ r6 += (3 * r7);
+ r6 += 2;
+ r6 >>= 2;
+ temp = r0 | (r1 << 8);
+ temp |= (r2 << 16);
+ temp |= (r3 << 24);
+ *((uint32*)pred) = temp;
+
+ temp = (temp >> 8) | (r4 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = (temp >> 8) | (r5 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = (temp >> 8) | (r6 << 24);
+ *((uint32*)(pred += 4)) = temp;
+ }
+ else
+ {
+ r2 += (r3 * 3);
+ r2 += 2;
+ r2 >>= 2;
+ r3 = ((r3 << 2) + 2);
+ r3 >>= 2;
+
+ temp = r0 | (r1 << 8);
+ temp |= (r2 << 16);
+ temp |= (r3 << 24);
+ *((uint32*)pred) = temp;
+
+ temp = (temp >> 8) | (r3 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = (temp >> 8) | (r3 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = (temp >> 8) | (r3 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ }
+ }
+
+ /* Down Right */
+ mode_avail[AVC_I4_Diagonal_Down_Right] = 0;
+ /* Diagonal Vertical Right */
+ mode_avail[AVC_I4_Vertical_Right] = 0;
+ /* Horizontal Down */
+ mode_avail[AVC_I4_Horizontal_Down] = 0;
+
+ if (top_left == TRUE)
+ {
+ /* Down Right */
+ mode_avail[AVC_I4_Diagonal_Down_Right] = 1;
+ pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Right];
+
+ Q_x = (P_A + 2 * P_B + P_C + 2) >> 2;
+ R_x = (P_B + 2 * P_C + P_D + 2) >> 2;
+ P_x = (P_X + 2 * P_A + P_B + 2) >> 2;
+ D = (P_A + 2 * P_X + P_I + 2) >> 2;
+ P_y = (P_X + 2 * P_I + P_J + 2) >> 2;
+ Q_y = (P_I + 2 * P_J + P_K + 2) >> 2;
+ R_y = (P_J + 2 * P_K + P_L + 2) >> 2;
+
+ /* we can pack these */
+ temp = D | (P_x << 8); //[D P_x Q_x R_x]
+ //[P_y D P_x Q_x]
+ temp |= (Q_x << 16); //[Q_y P_y D P_x]
+ temp |= (R_x << 24); //[R_y Q_y P_y D ]
+ *((uint32*)pred) = temp;
+
+ temp = P_y | (D << 8);
+ temp |= (P_x << 16);
+ temp |= (Q_x << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = Q_y | (P_y << 8);
+ temp |= (D << 16);
+ temp |= (P_x << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = R_y | (Q_y << 8);
+ temp |= (P_y << 16);
+ temp |= (D << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+
+ /* Diagonal Vertical Right */
+ mode_avail[AVC_I4_Vertical_Right] = 1;
+ pred = encvid->pred_i4[AVC_I4_Vertical_Right];
+
+ Q0 = P_A + P_B + 1;
+ R0 = P_B + P_C + 1;
+ S0 = P_C + P_D + 1;
+ P0 = P_X + P_A + 1;
+ D = (P_I + 2 * P_X + P_A + 2) >> 2;
+
+ P1 = (P0 + Q0) >> 2;
+ Q1 = (Q0 + R0) >> 2;
+ R1 = (R0 + S0) >> 2;
+
+ P0 >>= 1;
+ Q0 >>= 1;
+ R0 >>= 1;
+ S0 >>= 1;
+
+ P2 = (P_X + 2 * P_I + P_J + 2) >> 2;
+ Q2 = (P_I + 2 * P_J + P_K + 2) >> 2;
+
+ temp = P0 | (Q0 << 8); //[P0 Q0 R0 S0]
+ //[D P1 Q1 R1]
+ temp |= (R0 << 16); //[P2 P0 Q0 R0]
+ temp |= (S0 << 24); //[Q2 D P1 Q1]
+ *((uint32*)pred) = temp;
+
+ temp = D | (P1 << 8);
+ temp |= (Q1 << 16);
+ temp |= (R1 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = P2 | (P0 << 8);
+ temp |= (Q0 << 16);
+ temp |= (R0 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = Q2 | (D << 8);
+ temp |= (P1 << 16);
+ temp |= (Q1 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+
+ /* Horizontal Down */
+ mode_avail[AVC_I4_Horizontal_Down] = 1;
+ pred = encvid->pred_i4[AVC_I4_Horizontal_Down];
+
+
+ Q2 = (P_A + 2 * P_B + P_C + 2) >> 2;
+ P2 = (P_X + 2 * P_A + P_B + 2) >> 2;
+ D = (P_I + 2 * P_X + P_A + 2) >> 2;
+ P0 = P_X + P_I + 1;
+ Q0 = P_I + P_J + 1;
+ R0 = P_J + P_K + 1;
+ S0 = P_K + P_L + 1;
+
+ P1 = (P0 + Q0) >> 2;
+ Q1 = (Q0 + R0) >> 2;
+ R1 = (R0 + S0) >> 2;
+
+ P0 >>= 1;
+ Q0 >>= 1;
+ R0 >>= 1;
+ S0 >>= 1;
+
+
+ /* we can pack these */
+ temp = P0 | (D << 8); //[P0 D P2 Q2]
+ //[Q0 P1 P0 D ]
+ temp |= (P2 << 16); //[R0 Q1 Q0 P1]
+ temp |= (Q2 << 24); //[S0 R1 R0 Q1]
+ *((uint32*)pred) = temp;
+
+ temp = Q0 | (P1 << 8);
+ temp |= (P0 << 16);
+ temp |= (D << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = R0 | (Q1 << 8);
+ temp |= (Q0 << 16);
+ temp |= (P1 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ temp = S0 | (R1 << 8);
+ temp |= (R0 << 16);
+ temp |= (Q1 << 24);
+ *((uint32*)(pred += 4)) = temp;
+
+ }
+
+ /* vertical left */
+ mode_avail[AVC_I4_Vertical_Left] = 0;
+ if (availability.top)
+ {
+ mode_avail[AVC_I4_Vertical_Left] = 1;
+ pred = encvid->pred_i4[AVC_I4_Vertical_Left];
+
+ x0 = P_A + P_B + 1;
+ x1 = P_B + P_C + 1;
+ x2 = P_C + P_D + 1;
+ if (availability.top_right)
+ {
+ x3 = P_D + P_E + 1;
+ x4 = P_E + P_F + 1;
+ x5 = P_F + P_G + 1;
+ }
+ else
+ {
+ x3 = x4 = x5 = (P_D << 1) + 1;
+ }
+
+ temp1 = (x0 >> 1);
+ temp1 |= ((x1 >> 1) << 8);
+ temp1 |= ((x2 >> 1) << 16);
+ temp1 |= ((x3 >> 1) << 24);
+
+ *((uint32*)pred) = temp1;
+
+ temp2 = ((x0 + x1) >> 2);
+ temp2 |= (((x1 + x2) >> 2) << 8);
+ temp2 |= (((x2 + x3) >> 2) << 16);
+ temp2 |= (((x3 + x4) >> 2) << 24);
+
+ *((uint32*)(pred += 4)) = temp2;
+
+ temp1 = (temp1 >> 8) | ((x4 >> 1) << 24); /* rotate out old value */
+ *((uint32*)(pred += 4)) = temp1;
+
+ temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */
+ *((uint32*)(pred += 4)) = temp2;
+ }
+
+ //===== LOOP OVER ALL 4x4 INTRA PREDICTION MODES =====
+ // can re-order the search here instead of going in order
+
+ // find most probable mode
+ encvid->mostProbableI4Mode[blkidx] = mostProbableMode = FindMostProbableI4Mode(video, blkidx);
+
+ min_cost = 0xFFFF;
+
+ for (ipmode = 0; ipmode < AVCNumI4PredMode; ipmode++)
+ {
+ if (mode_avail[ipmode] == TRUE)
+ {
+ cost = (ipmode == mostProbableMode) ? 0 : fixedcost;
+ pred = encvid->pred_i4[ipmode];
+
+ cost_i4(org, org_pitch, pred, &cost);
+
+ if (cost < min_cost)
+ {
+ currMB->i4Mode[blkidx] = (AVCIntra4x4PredMode)ipmode;
+ min_cost = cost;
+ min_sad = cost - ((ipmode == mostProbableMode) ? 0 : fixedcost);
+ }
+ }
+ }
+
+ if (blkidx == 0)
+ {
+ encvid->i4_sad = min_sad;
+ }
+ else
+ {
+ encvid->i4_sad += min_sad;
+ }
+
+ return min_cost;
+}
+
+int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx)
+{
+ int dcOnlyPredictionFlag;
+ AVCMacroblock *currMB = video->currMB;
+ int intra4x4PredModeA, intra4x4PredModeB, predIntra4x4PredMode;
+
+
+ dcOnlyPredictionFlag = 0;
+ if (blkidx&0x3)
+ {
+ intra4x4PredModeA = currMB->i4Mode[blkidx-1]; // block to the left
+ }
+ else /* for blk 0, 4, 8, 12 */
+ {
+ if (video->intraAvailA)
+ {
+ if (video->mblock[video->mbAddrA].mbMode == AVC_I4)
+ {
+ intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[blkidx + 3];
+ }
+ else
+ {
+ intra4x4PredModeA = AVC_I4_DC;
+ }
+ }
+ else
+ {
+ dcOnlyPredictionFlag = 1;
+ goto PRED_RESULT_READY; // skip below
+ }
+ }
+
+ if (blkidx >> 2)
+ {
+ intra4x4PredModeB = currMB->i4Mode[blkidx-4]; // block above
+ }
+ else /* block 0, 1, 2, 3 */
+ {
+ if (video->intraAvailB)
+ {
+ if (video->mblock[video->mbAddrB].mbMode == AVC_I4)
+ {
+ intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[blkidx+12];
+ }
+ else
+ {
+ intra4x4PredModeB = AVC_I4_DC;
+ }
+ }
+ else
+ {
+ dcOnlyPredictionFlag = 1;
+ }
+ }
+
+PRED_RESULT_READY:
+ if (dcOnlyPredictionFlag)
+ {
+ intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;
+ }
+
+ predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);
+
+ return predIntra4x4PredMode;
+}
+
+void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost)
+{
+ int k;
+ int16 res[16], *pres;
+ int m0, m1, m2, m3, tmp1;
+ int satd = 0;
+
+ pres = res;
+ // horizontal transform
+ k = 4;
+ while (k > 0)
+ {
+ m0 = org[0] - pred[0];
+ m3 = org[3] - pred[3];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = org[1] - pred[1];
+ m2 = org[2] - pred[2];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 + m1;
+ pres[2] = m0 - m1;
+ pres[1] = m2 + m3;
+ pres[3] = m3 - m2;
+
+ org += org_pitch;
+ pres += 4;
+ pred += 4;
+ k--;
+ }
+ /* vertical transform */
+ pres = res;
+ k = 4;
+ while (k > 0)
+ {
+ m0 = pres[0];
+ m3 = pres[12];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = pres[4];
+ m2 = pres[8];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 + m1;
+ pres[8] = m0 - m1;
+ pres[4] = m2 + m3;
+ pres[12] = m3 - m2;
+
+ pres++;
+ k--;
+
+ }
+
+ pres = res;
+ k = 4;
+ while (k > 0)
+ {
+ tmp1 = *pres++;
+ satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ satd += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ k--;
+ }
+
+ satd = (satd + 1) >> 1;
+ *cost += satd;
+
+ return ;
+}
+
+void chroma_intra_search(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCPictureData *currPic = video->currPic;
+
+ int x_pos = video->mb_x << 3;
+ int y_pos = video->mb_y << 3;
+ int pitch = currPic->pitch >> 1;
+ int offset = y_pos * pitch + x_pos;
+
+ uint8 *comp_ref_x, *comp_ref_y, *pred;
+ int sum_x0, sum_x1, sum_y0, sum_y1;
+ int pred_0[2], pred_1[2], pred_2[2], pred_3[2];
+ uint32 pred_a, pred_b, pred_c, pred_d;
+ int i, j, component;
+ int a_16, b, c, factor_c, topleft;
+ int H, V, value;
+ uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;
+
+ uint8 *curCb = currPic->Scb + offset;
+ uint8 *curCr = currPic->Scr + offset;
+
+ uint8 *orgCb, *orgCr;
+ AVCFrameIO *currInput = encvid->currInput;
+ AVCMacroblock *currMB = video->currMB;
+ int org_pitch;
+ int cost, mincost;
+
+ /* evaluate DC mode */
+ if (video->intraAvailB & video->intraAvailA)
+ {
+ comp_ref_x = curCb - pitch;
+ comp_ref_y = curCb - 1;
+
+ for (i = 0; i < 2; i++)
+ {
+ pred_a = *((uint32*)comp_ref_x);
+ comp_ref_x += 4;
+ pred_b = (pred_a >> 8) & 0xFF00FF;
+ pred_a &= 0xFF00FF;
+ pred_a += pred_b;
+ pred_a += (pred_a >> 16);
+ sum_x0 = pred_a & 0xFFFF;
+
+ pred_a = *((uint32*)comp_ref_x);
+ pred_b = (pred_a >> 8) & 0xFF00FF;
+ pred_a &= 0xFF00FF;
+ pred_a += pred_b;
+ pred_a += (pred_a >> 16);
+ sum_x1 = pred_a & 0xFFFF;
+
+ pred_1[i] = (sum_x1 + 2) >> 2;
+
+ sum_y0 = *comp_ref_y;
+ sum_y0 += *(comp_ref_y += pitch);
+ sum_y0 += *(comp_ref_y += pitch);
+ sum_y0 += *(comp_ref_y += pitch);
+
+ sum_y1 = *(comp_ref_y += pitch);
+ sum_y1 += *(comp_ref_y += pitch);
+ sum_y1 += *(comp_ref_y += pitch);
+ sum_y1 += *(comp_ref_y += pitch);
+
+ pred_2[i] = (sum_y1 + 2) >> 2;
+
+ pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3;
+ pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3;
+
+ comp_ref_x = curCr - pitch;
+ comp_ref_y = curCr - 1;
+ }
+ }
+
+ else if (video->intraAvailA)
+ {
+ comp_ref_y = curCb - 1;
+ for (i = 0; i < 2; i++)
+ {
+ sum_y0 = *comp_ref_y;
+ sum_y0 += *(comp_ref_y += pitch);
+ sum_y0 += *(comp_ref_y += pitch);
+ sum_y0 += *(comp_ref_y += pitch);
+
+ sum_y1 = *(comp_ref_y += pitch);
+ sum_y1 += *(comp_ref_y += pitch);
+ sum_y1 += *(comp_ref_y += pitch);
+ sum_y1 += *(comp_ref_y += pitch);
+
+ pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2;
+ pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2;
+
+ comp_ref_y = curCr - 1;
+ }
+ }
+ else if (video->intraAvailB)
+ {
+ comp_ref_x = curCb - pitch;
+ for (i = 0; i < 2; i++)
+ {
+ pred_a = *((uint32*)comp_ref_x);
+ comp_ref_x += 4;
+ pred_b = (pred_a >> 8) & 0xFF00FF;
+ pred_a &= 0xFF00FF;
+ pred_a += pred_b;
+ pred_a += (pred_a >> 16);
+ sum_x0 = pred_a & 0xFFFF;
+
+ pred_a = *((uint32*)comp_ref_x);
+ pred_b = (pred_a >> 8) & 0xFF00FF;
+ pred_a &= 0xFF00FF;
+ pred_a += pred_b;
+ pred_a += (pred_a >> 16);
+ sum_x1 = pred_a & 0xFFFF;
+
+ pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2;
+ pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2;
+
+ comp_ref_x = curCr - pitch;
+ }
+ }
+ else
+ {
+ pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] =
+ pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128;
+ }
+
+ pred = encvid->pred_ic[AVC_IC_DC];
+
+ pred_a = pred_0[0];
+ pred_b = pred_1[0];
+ pred_a |= (pred_a << 8);
+ pred_a |= (pred_a << 16);
+ pred_b |= (pred_b << 8);
+ pred_b |= (pred_b << 16);
+
+ pred_c = pred_0[1];
+ pred_d = pred_1[1];
+ pred_c |= (pred_c << 8);
+ pred_c |= (pred_c << 16);
+ pred_d |= (pred_d << 8);
+ pred_d |= (pred_d << 16);
+
+
+ for (j = 0; j < 4; j++) /* 4 lines */
+ {
+ *((uint32*)pred) = pred_a;
+ *((uint32*)(pred + 4)) = pred_b;
+ *((uint32*)(pred + 8)) = pred_c;
+ *((uint32*)(pred + 12)) = pred_d;
+ pred += 16; /* move to the next line */
+ }
+
+ pred_a = pred_2[0];
+ pred_b = pred_3[0];
+ pred_a |= (pred_a << 8);
+ pred_a |= (pred_a << 16);
+ pred_b |= (pred_b << 8);
+ pred_b |= (pred_b << 16);
+
+ pred_c = pred_2[1];
+ pred_d = pred_3[1];
+ pred_c |= (pred_c << 8);
+ pred_c |= (pred_c << 16);
+ pred_d |= (pred_d << 8);
+ pred_d |= (pred_d << 16);
+
+ for (j = 0; j < 4; j++) /* 4 lines */
+ {
+ *((uint32*)pred) = pred_a;
+ *((uint32*)(pred + 4)) = pred_b;
+ *((uint32*)(pred + 8)) = pred_c;
+ *((uint32*)(pred + 12)) = pred_d;
+ pred += 16; /* move to the next line */
+ }
+
+ /* predict horizontal mode */
+ if (video->intraAvailA)
+ {
+ comp_ref_y = curCb - 1;
+ comp_ref_x = curCr - 1;
+ pred = encvid->pred_ic[AVC_IC_Horizontal];
+
+ for (i = 4; i < 6; i++)
+ {
+ for (j = 0; j < 4; j++)
+ {
+ pred_a = *comp_ref_y;
+ comp_ref_y += pitch;
+ pred_a |= (pred_a << 8);
+ pred_a |= (pred_a << 16);
+ *((uint32*)pred) = pred_a;
+ *((uint32*)(pred + 4)) = pred_a;
+
+ pred_a = *comp_ref_x;
+ comp_ref_x += pitch;
+ pred_a |= (pred_a << 8);
+ pred_a |= (pred_a << 16);
+ *((uint32*)(pred + 8)) = pred_a;
+ *((uint32*)(pred + 12)) = pred_a;
+
+ pred += 16;
+ }
+ }
+ }
+
+ /* vertical mode */
+ if (video->intraAvailB)
+ {
+ comp_ref_x = curCb - pitch;
+ comp_ref_y = curCr - pitch;
+ pred = encvid->pred_ic[AVC_IC_Vertical];
+
+ pred_a = *((uint32*)comp_ref_x);
+ pred_b = *((uint32*)(comp_ref_x + 4));
+ pred_c = *((uint32*)comp_ref_y);
+ pred_d = *((uint32*)(comp_ref_y + 4));
+
+ for (j = 0; j < 8; j++)
+ {
+ *((uint32*)pred) = pred_a;
+ *((uint32*)(pred + 4)) = pred_b;
+ *((uint32*)(pred + 8)) = pred_c;
+ *((uint32*)(pred + 12)) = pred_d;
+ pred += 16;
+ }
+ }
+
+ /* Intra_Chroma_Plane */
+ if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
+ {
+ comp_ref_x = curCb - pitch;
+ comp_ref_y = curCb - 1;
+ topleft = curCb[-pitch-1];
+
+ pred = encvid->pred_ic[AVC_IC_Plane];
+ for (component = 0; component < 2; component++)
+ {
+ H = V = 0;
+ comp_ref_x0 = comp_ref_x + 4;
+ comp_ref_x1 = comp_ref_x + 2;
+ comp_ref_y0 = comp_ref_y + (pitch << 2);
+ comp_ref_y1 = comp_ref_y + (pitch << 1);
+ for (i = 1; i < 4; i++)
+ {
+ H += i * (*comp_ref_x0++ - *comp_ref_x1--);
+ V += i * (*comp_ref_y0 - *comp_ref_y1);
+ comp_ref_y0 += pitch;
+ comp_ref_y1 -= pitch;
+ }
+ H += i * (*comp_ref_x0++ - topleft);
+ V += i * (*comp_ref_y0 - *comp_ref_y1);
+
+ a_16 = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16;
+ b = (17 * H + 16) >> 5;
+ c = (17 * V + 16) >> 5;
+
+ pred_a = 0;
+ for (i = 4; i < 6; i++)
+ {
+ for (j = 0; j < 4; j++)
+ {
+ factor_c = a_16 + c * (pred_a++ - 3);
+
+ factor_c -= 3 * b;
+
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b = value;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b |= (value << 8);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b |= (value << 16);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b |= (value << 24);
+ *((uint32*)pred) = pred_b;
+
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b = value;
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b |= (value << 8);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b |= (value << 16);
+ value = factor_c >> 5;
+ factor_c += b;
+ CLIP_RESULT(value)
+ pred_b |= (value << 24);
+ *((uint32*)(pred + 4)) = pred_b;
+ pred += 16;
+ }
+ }
+
+ pred -= 120; /* point to cr */
+ comp_ref_x = curCr - pitch;
+ comp_ref_y = curCr - 1;
+ topleft = curCr[-pitch-1];
+ }
+ }
+
+ /* now evaluate it */
+
+ org_pitch = (currInput->pitch) >> 1;
+ offset = x_pos + y_pos * org_pitch;
+
+ orgCb = currInput->YCbCr[1] + offset;
+ orgCr = currInput->YCbCr[2] + offset;
+
+ mincost = 0x7fffffff;
+ cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_DC], mincost);
+ if (cost < mincost)
+ {
+ mincost = cost;
+ currMB->intra_chroma_pred_mode = AVC_IC_DC;
+ }
+
+ if (video->intraAvailA)
+ {
+ cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Horizontal], mincost);
+ if (cost < mincost)
+ {
+ mincost = cost;
+ currMB->intra_chroma_pred_mode = AVC_IC_Horizontal;
+ }
+ }
+
+ if (video->intraAvailB)
+ {
+ cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Vertical], mincost);
+ if (cost < mincost)
+ {
+ mincost = cost;
+ currMB->intra_chroma_pred_mode = AVC_IC_Vertical;
+ }
+ }
+
+ if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
+ {
+ cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Plane], mincost);
+ if (cost < mincost)
+ {
+ mincost = cost;
+ currMB->intra_chroma_pred_mode = AVC_IC_Plane;
+ }
+ }
+
+
+ return ;
+}
+
+
+int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int min_cost)
+{
+ int cost;
+ /* first take difference between orgCb, orgCr and pred */
+ int16 res[128], *pres; // residue
+ int m0, m1, m2, m3, tmp1;
+ int j, k;
+
+ pres = res;
+ org_pitch -= 8;
+ // horizontal transform
+ for (j = 0; j < 8; j++)
+ {
+ k = 2;
+ while (k > 0)
+ {
+ m0 = orgCb[0] - pred[0];
+ m3 = orgCb[3] - pred[3];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = orgCb[1] - pred[1];
+ m2 = orgCb[2] - pred[2];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 + m1;
+ pres[2] = m0 - m1;
+ pres[1] = m2 + m3;
+ pres[3] = m3 - m2;
+
+ orgCb += 4;
+ pres += 4;
+ pred += 4;
+ k--;
+ }
+ orgCb += org_pitch;
+ k = 2;
+ while (k > 0)
+ {
+ m0 = orgCr[0] - pred[0];
+ m3 = orgCr[3] - pred[3];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = orgCr[1] - pred[1];
+ m2 = orgCr[2] - pred[2];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 + m1;
+ pres[2] = m0 - m1;
+ pres[1] = m2 + m3;
+ pres[3] = m3 - m2;
+
+ orgCr += 4;
+ pres += 4;
+ pred += 4;
+ k--;
+ }
+ orgCr += org_pitch;
+ }
+
+ /* vertical transform */
+ for (j = 0; j < 2; j++)
+ {
+ pres = res + (j << 6);
+ k = 16;
+ while (k > 0)
+ {
+ m0 = pres[0];
+ m3 = pres[3<<4];
+ m0 += m3;
+ m3 = m0 - (m3 << 1);
+ m1 = pres[1<<4];
+ m2 = pres[2<<4];
+ m1 += m2;
+ m2 = m1 - (m2 << 1);
+ pres[0] = m0 + m1;
+ pres[2<<4] = m0 - m1;
+ pres[1<<4] = m2 + m3;
+ pres[3<<4] = m3 - m2;
+
+ pres++;
+ k--;
+ }
+ }
+
+ /* now sum of absolute value */
+ pres = res;
+ cost = 0;
+ k = 128;
+ while (k > 0)
+ {
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ tmp1 = *pres++;
+ cost += ((tmp1 >= 0) ? tmp1 : -tmp1);
+ k -= 8;
+ if (cost > min_cost) /* early drop out */
+ {
+ return cost;
+ }
+ }
+
+ return cost;
+}
+
+
+
+///////////////////////////////// old code, unused
+/* find the best intra mode based on original (unencoded) frame */
+/* output is
+ currMB->mb_intra, currMB->mbMode,
+ currMB->i16Mode (if currMB->mbMode == AVC_I16)
+ currMB->i4Mode[..] (if currMB->mbMode == AVC_I4) */
+
+#ifdef FIXED_INTRAPRED_MODE
+void MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum)
+{
+ (void)(mbNum);
+
+ AVCCommonObj *video = encvid->common;
+ int indx, block_x, block_y;
+
+ video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
+
+ if (!video->currPicParams->constrained_intra_pred_flag)
+ {
+ video->intraAvailA = video->mbAvailA;
+ video->intraAvailB = video->mbAvailB;
+ video->intraAvailC = video->mbAvailC;
+ video->intraAvailD = video->mbAvailD;
+ }
+ else
+ {
+ if (video->mbAvailA)
+ {
+ video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
+ }
+ if (video->mbAvailB)
+ {
+ video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
+ }
+ if (video->mbAvailC)
+ {
+ video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
+ }
+ if (video->mbAvailD)
+ {
+ video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
+ }
+ }
+
+ currMB->mb_intra = TRUE;
+ currMB->mbMode = FIXED_INTRAPRED_MODE;
+
+ if (currMB->mbMode == AVC_I16)
+ {
+ currMB->i16Mode = FIXED_I16_MODE;
+
+ if (FIXED_I16_MODE == AVC_I16_Vertical && !video->intraAvailB)
+ {
+ currMB->i16Mode = AVC_I16_DC;
+ }
+
+ if (FIXED_I16_MODE == AVC_I16_Horizontal && !video->intraAvailA)
+ {
+ currMB->i16Mode = AVC_I16_DC;
+ }
+
+ if (FIXED_I16_MODE == AVC_I16_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD))
+ {
+ currMB->i16Mode = AVC_I16_DC;
+ }
+ }
+ else //if(currMB->mbMode == AVC_I4)
+ {
+ for (indx = 0; indx < 16; indx++)
+ {
+ block_x = blkIdx2blkX[indx];
+ block_y = blkIdx2blkY[indx];
+
+ currMB->i4Mode[(block_y<<2)+block_x] = FIXED_I4_MODE;
+
+ if (FIXED_I4_MODE == AVC_I4_Vertical && !(block_y > 0 || video->intraAvailB))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Horizontal && !(block_x || video->intraAvailA))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Left &&
+ (block_y == 0 && !video->intraAvailB))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Right &&
+ !((block_y && block_x)
+ || (block_y && video->intraAvailA)
+ || (block_x && video->intraAvailB)
+ || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Vertical_Right &&
+ !((block_y && block_x)
+ || (block_y && video->intraAvailA)
+ || (block_x && video->intraAvailB)
+ || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Horizontal_Down &&
+ !((block_y && block_x)
+ || (block_y && video->intraAvailA)
+ || (block_x && video->intraAvailB)
+ || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Vertical_Left &&
+ (block_y == 0 && !video->intraAvailB))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+
+ if (FIXED_I4_MODE == AVC_I4_Horizontal_Up && !(block_x || video->intraAvailA))
+ {
+ currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;
+ }
+ }
+ }
+
+ currMB->intra_chroma_pred_mode = FIXED_INTRA_CHROMA_MODE;
+
+ if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Horizontal && !(video->intraAvailA))
+ {
+ currMB->intra_chroma_pred_mode = AVC_IC_DC;
+ }
+
+ if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Vertical && !(video->intraAvailB))
+ {
+ currMB->intra_chroma_pred_mode = AVC_IC_DC;
+ }
+
+ if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD))
+ {
+ currMB->intra_chroma_pred_mode = AVC_IC_DC;
+ }
+
+ /* also reset the motion vectors */
+ /* set MV and Ref_Idx codes of Intra blocks in P-slices */
+ memset(currMB->mvL0, 0, sizeof(int32)*16);
+ currMB->ref_idx_L0[0] = -1;
+ currMB->ref_idx_L0[1] = -1;
+ currMB->ref_idx_L0[2] = -1;
+ currMB->ref_idx_L0[3] = -1;
+
+ // output from this function, currMB->mbMode should be set to either
+ // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */
+ return ;
+}
+#else // faster combined prediction+SAD calculation
+void MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCFrameIO *currInput = encvid->currInput;
+ uint8 *curL, *curCb, *curCr;
+ uint8 *comp, *pred_block;
+ int block_x, block_y, offset;
+ uint sad, sad4, sadI4, sadI16;
+ int component, SubBlock_indx, temp;
+ int pitch = video->currPic->pitch;
+
+ /* calculate the cost of each intra prediction mode and compare to the
+ inter mode */
+ /* full search for all intra prediction */
+ offset = (video->mb_y << 4) * pitch + (video->mb_x << 4);
+ curL = currInput->YCbCr[0] + offset;
+ pred_block = video->pred_block + 84;
+
+ /* Assuming that InitNeighborAvailability has been called prior to this function */
+ video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
+
+ if (!video->currPicParams->constrained_intra_pred_flag)
+ {
+ video->intraAvailA = video->mbAvailA;
+ video->intraAvailB = video->mbAvailB;
+ video->intraAvailC = video->mbAvailC;
+ video->intraAvailD = video->mbAvailD;
+ }
+ else
+ {
+ if (video->mbAvailA)
+ {
+ video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
+ }
+ if (video->mbAvailB)
+ {
+ video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
+ }
+ if (video->mbAvailC)
+ {
+ video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
+ }
+ if (video->mbAvailD)
+ {
+ video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
+ }
+ }
+
+ /* currently we're doing exhaustive search. Smart search will be used later */
+
+ /* I16 modes */
+ curL = currInput->YCbCr[0] + offset;
+ video->pintra_pred_top = curL - pitch;
+ video->pintra_pred_left = curL - 1;
+ if (video->mb_y)
+ {
+ video->intra_pred_topleft = *(curL - pitch - 1);
+ }
+
+ /* Intra_16x16_Vertical */
+ sadI16 = 65536;
+ /* check availability of top */
+ if (video->intraAvailB)
+ {
+ sad = SAD_I16_Vert(video, curL, sadI16);
+
+ if (sad < sadI16)
+ {
+ sadI16 = sad;
+ currMB->i16Mode = AVC_I16_Vertical;
+ }
+ }
+ /* Intra_16x16_Horizontal */
+ /* check availability of left */
+ if (video->intraAvailA)
+ {
+ sad = SAD_I16_HorzDC(video, curL, AVC_I16_Horizontal, sadI16);
+
+ if (sad < sadI16)
+ {
+ sadI16 = sad;
+ currMB->i16Mode = AVC_I16_Horizontal;
+ }
+ }
+
+ /* Intra_16x16_DC, default mode */
+ sad = SAD_I16_HorzDC(video, curL, AVC_I16_DC, sadI16);
+ if (sad < sadI16)
+ {
+ sadI16 = sad;
+ currMB->i16Mode = AVC_I16_DC;
+ }
+
+ /* Intra_16x16_Plane */
+ if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
+ {
+ sad = SAD_I16_Plane(video, curL, sadI16);
+
+ if (sad < sadI16)
+ {
+ sadI16 = sad;
+ currMB->i16Mode = AVC_I16_Plane;
+ }
+ }
+
+ sadI16 >>= 1; /* before comparison */
+
+ /* selection between intra4, intra16 or inter mode */
+ if (sadI16 < encvid->min_cost)
+ {
+ currMB->mb_intra = TRUE;
+ currMB->mbMode = AVC_I16;
+ encvid->min_cost = sadI16;
+ }
+
+ if (currMB->mb_intra) /* only do the chrominance search when intra is decided */
+ {
+ /* Note that we might be able to guess the type of prediction from
+ the luma prediction type */
+
+ /* now search for the best chroma intra prediction */
+ offset = (offset >> 2) + (video->mb_x << 2);
+ curCb = currInput->YCbCr[1] + offset;
+ curCr = currInput->YCbCr[2] + offset;
+
+ pitch >>= 1;
+ video->pintra_pred_top_cb = curCb - pitch;
+ video->pintra_pred_left_cb = curCb - 1;
+ video->pintra_pred_top_cr = curCr - pitch;
+ video->pintra_pred_left_cr = curCr - 1;
+
+ if (video->mb_y)
+ {
+ video->intra_pred_topleft_cb = *(curCb - pitch - 1);
+ video->intra_pred_topleft_cr = *(curCr - pitch - 1);
+ }
+
+ /* Intra_Chroma_DC */
+ sad4 = SAD_Chroma_DC(video, curCb, curCr, 65536);
+ currMB->intra_chroma_pred_mode = AVC_IC_DC;
+
+ /* Intra_Chroma_Horizontal */
+ if (video->intraAvailA)
+ {
+ /* check availability of left */
+ sad = SAD_Chroma_Horz(video, curCb, curCr, sad4);
+ if (sad < sad4)
+ {
+ sad4 = sad;
+ currMB->intra_chroma_pred_mode = AVC_IC_Horizontal;
+ }
+ }
+
+ /* Intra_Chroma_Vertical */
+ if (video->intraAvailB)
+ {
+ /* check availability of top */
+ sad = SAD_Chroma_Vert(video, curCb, curCr, sad4);
+
+ if (sad < sad4)
+ {
+ sad4 = sad;
+ currMB->intra_chroma_pred_mode = AVC_IC_Vertical;
+ }
+ }
+
+ /* Intra_Chroma_Plane */
+ if (video->intraAvailA && video->intraAvailB && video->intraAvailD)
+ {
+ /* check availability of top and left */
+ Intra_Chroma_Plane(video, pitch);
+
+ sad = SADChroma(pred_block + 452, curCb, curCr, pitch);
+
+ if (sad < sad4)
+ {
+ sad4 = sad;
+ currMB->intra_chroma_pred_mode = AVC_IC_Plane;
+ }
+ }
+
+ /* also reset the motion vectors */
+ /* set MV and Ref_Idx codes of Intra blocks in P-slices */
+ memset(currMB->mvL0, 0, sizeof(int32)*16);
+ memset(currMB->ref_idx_L0, -1, sizeof(int16)*4);
+
+ }
+
+ // output from this function, currMB->mbMode should be set to either
+ // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */
+
+ return ;
+}
+#endif
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/motion_comp.cpp b/media/libstagefright/codecs/avc/enc/src/motion_comp.cpp
new file mode 100644
index 0000000..ac62d78
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/motion_comp.cpp
@@ -0,0 +1,2156 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+#include "avcenc_int.h"
+
+
+#define CLIP_RESULT(x) if((uint)x > 0xFF){ \
+ x = 0xFF & (~(x>>31));}
+
+/* (blkwidth << 2) + (dy << 1) + dx */
+static void (*const eChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) =
+{
+ &eChromaFullMC_SIMD,
+ &eChromaHorizontalMC_SIMD,
+ &eChromaVerticalMC_SIMD,
+ &eChromaDiagonalMC_SIMD,
+ &eChromaFullMC_SIMD,
+ &eChromaHorizontalMC2_SIMD,
+ &eChromaVerticalMC2_SIMD,
+ &eChromaDiagonalMC2_SIMD
+};
+/* Perform motion prediction and compensation with residue if exist. */
+void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video)
+{
+ (void)(encvid);
+
+ AVCMacroblock *currMB = video->currMB;
+ AVCPictureData *currPic = video->currPic;
+ int mbPartIdx, subMbPartIdx;
+ int ref_idx;
+ int offset_MbPart_indx = 0;
+ int16 *mv;
+ uint32 x_pos, y_pos;
+ uint8 *curL, *curCb, *curCr;
+ uint8 *ref_l, *ref_Cb, *ref_Cr;
+ uint8 *predBlock, *predCb, *predCr;
+ int block_x, block_y, offset_x, offset_y, offsetP, offset;
+ int x_position = (video->mb_x << 4);
+ int y_position = (video->mb_y << 4);
+ int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx;
+ int picWidth = currPic->width;
+ int picPitch = currPic->pitch;
+ int picHeight = currPic->height;
+ uint32 tmp_word;
+
+ tmp_word = y_position * picPitch;
+ curL = currPic->Sl + tmp_word + x_position;
+ offset = (tmp_word >> 2) + (x_position >> 1);
+ curCb = currPic->Scb + offset;
+ curCr = currPic->Scr + offset;
+
+ predBlock = curL;
+ predCb = curCb;
+ predCr = curCr;
+
+ GetMotionVectorPredictor(video, 1);
+
+ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
+ {
+ MbHeight = currMB->SubMbPartHeight[mbPartIdx];
+ MbWidth = currMB->SubMbPartWidth[mbPartIdx];
+ mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1);
+ mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1;
+ ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X];
+ offset_indx = 0;
+
+ ref_l = video->RefPicList0[ref_idx]->Sl;
+ ref_Cb = video->RefPicList0[ref_idx]->Scb;
+ ref_Cr = video->RefPicList0[ref_idx]->Scr;
+
+ for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
+ {
+ block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1);
+ block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1);
+ mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));
+ offset_x = x_position + (block_x << 2);
+ offset_y = y_position + (block_y << 2);
+ x_pos = (offset_x << 2) + *mv++; /*quarter pel */
+ y_pos = (offset_y << 2) + *mv; /*quarter pel */
+
+ //offset = offset_y * currPic->width;
+ //offsetC = (offset >> 2) + (offset_x >> 1);
+ offsetP = (block_y << 2) * picPitch + (block_x << 2);
+ eLumaMotionComp(ref_l, picPitch, picHeight, x_pos, y_pos,
+ /*comp_Sl + offset + offset_x,*/
+ predBlock + offsetP, picPitch, MbWidth, MbHeight);
+
+ offsetP = (block_y * picWidth) + (block_x << 1);
+ eChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
+ /*comp_Scb + offsetC,*/
+ predCb + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1);
+ eChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,
+ /*comp_Scr + offsetC,*/
+ predCr + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1);
+
+ offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;
+ }
+ offset_MbPart_indx = currMB->MbPartWidth >> 4;
+ }
+
+ return ;
+}
+
+
+/* preform the actual motion comp here */
+void eLumaMotionComp(uint8 *ref, int picpitch, int picheight,
+ int x_pos, int y_pos,
+ uint8 *pred, int pred_pitch,
+ int blkwidth, int blkheight)
+{
+ (void)(picheight);
+
+ int dx, dy;
+ int temp2[21][21]; /* for intermediate results */
+ uint8 *ref2;
+
+ dx = x_pos & 3;
+ dy = y_pos & 3;
+ x_pos = x_pos >> 2; /* round it to full-pel resolution */
+ y_pos = y_pos >> 2;
+
+ /* perform actual motion compensation */
+ if (dx == 0 && dy == 0)
+ { /* fullpel position *//* G */
+
+ ref += y_pos * picpitch + x_pos;
+
+ eFullPelMC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight);
+
+ } /* other positions */
+ else if (dy == 0)
+ { /* no vertical interpolation *//* a,b,c*/
+
+ ref += y_pos * picpitch + x_pos;
+
+ eHorzInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dx);
+ }
+ else if (dx == 0)
+ { /*no horizontal interpolation *//* d,h,n */
+
+ ref += y_pos * picpitch + x_pos;
+
+ eVertInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dy);
+ }
+ else if (dy == 2)
+ { /* horizontal cross *//* i, j, k */
+
+ ref += y_pos * picpitch + x_pos - 2; /* move to the left 2 pixels */
+
+ eVertInterp2MC(ref, picpitch, &temp2[0][0], 21, blkwidth + 5, blkheight);
+
+ eHorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);
+ }
+ else if (dx == 2)
+ { /* vertical cross */ /* f,q */
+
+ ref += (y_pos - 2) * picpitch + x_pos; /* move to up 2 lines */
+
+ eHorzInterp3MC(ref, picpitch, &temp2[0][0], 21, blkwidth, blkheight + 5);
+ eVertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);
+ }
+ else
+ { /* diagonal *//* e,g,p,r */
+
+ ref2 = ref + (y_pos + (dy / 2)) * picpitch + x_pos;
+
+ ref += (y_pos * picpitch) + x_pos + (dx / 2);
+
+ eDiagonalInterpMC(ref2, ref, picpitch, pred, pred_pitch, blkwidth, blkheight);
+ }
+
+ return ;
+}
+
+void eCreateAlign(uint8 *ref, int picpitch, int y_pos,
+ uint8 *out, int blkwidth, int blkheight)
+{
+ int i, j;
+ int offset, out_offset;
+ uint32 prev_pix, result, pix1, pix2, pix4;
+
+ ref += y_pos * picpitch;// + x_pos;
+ out_offset = 24 - blkwidth;
+
+ //switch(x_pos&0x3){
+ switch (((uint32)ref)&0x3)
+ {
+ case 1:
+ offset = picpitch - blkwidth - 3;
+ for (j = 0; j < blkheight; j++)
+ {
+ pix1 = *ref++;
+ pix2 = *((uint16*)ref);
+ ref += 2;
+ result = (pix2 << 8) | pix1;
+
+ for (i = 3; i < blkwidth; i += 4)
+ {
+ pix4 = *((uint32*)ref);
+ ref += 4;
+ prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */
+ result |= prev_pix;
+ *((uint32*)out) = result; /* write 4 bytes */
+ out += 4;
+ result = pix4 >> 8; /* for the next loop */
+ }
+ ref += offset;
+ out += out_offset;
+ }
+ break;
+ case 2:
+ offset = picpitch - blkwidth - 2;
+ for (j = 0; j < blkheight; j++)
+ {
+ result = *((uint16*)ref);
+ ref += 2;
+ for (i = 2; i < blkwidth; i += 4)
+ {
+ pix4 = *((uint32*)ref);
+ ref += 4;
+ prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */
+ result |= prev_pix;
+ *((uint32*)out) = result; /* write 4 bytes */
+ out += 4;
+ result = pix4 >> 16; /* for the next loop */
+ }
+ ref += offset;
+ out += out_offset;
+ }
+ break;
+ case 3:
+ offset = picpitch - blkwidth - 1;
+ for (j = 0; j < blkheight; j++)
+ {
+ result = *ref++;
+ for (i = 1; i < blkwidth; i += 4)
+ {
+ pix4 = *((uint32*)ref);
+ ref += 4;
+ prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */
+ result |= prev_pix;
+ *((uint32*)out) = result; /* write 4 bytes */
+ out += 4;
+ result = pix4 >> 24; /* for the next loop */
+ }
+ ref += offset;
+ out += out_offset;
+ }
+ break;
+ }
+}
+
+void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dx)
+{
+ uint8 *p_ref;
+ uint32 *p_cur;
+ uint32 tmp, pkres;
+ int result, curr_offset, ref_offset;
+ int j;
+ int32 r0, r1, r2, r3, r4, r5;
+ int32 r13, r6;
+
+ p_cur = (uint32*)out; /* assume it's word aligned */
+ curr_offset = (outpitch - blkwidth) >> 2;
+ p_ref = in;
+ ref_offset = inpitch - blkwidth;
+
+ if (dx&1)
+ {
+ dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */
+ p_ref -= 2;
+ r13 = 0;
+ for (j = blkheight; j > 0; j--)
+ {
+ tmp = (uint32)(p_ref + blkwidth);
+ r0 = p_ref[0];
+ r1 = p_ref[2];
+ r0 |= (r1 << 16); /* 0,c,0,a */
+ r1 = p_ref[1];
+ r2 = p_ref[3];
+ r1 |= (r2 << 16); /* 0,d,0,b */
+ while ((uint32)p_ref < tmp)
+ {
+ r2 = *(p_ref += 4); /* move pointer to e */
+ r3 = p_ref[2];
+ r2 |= (r3 << 16); /* 0,g,0,e */
+ r3 = p_ref[1];
+ r4 = p_ref[3];
+ r3 |= (r4 << 16); /* 0,h,0,f */
+
+ r4 = r0 + r3; /* c+h, a+f */
+ r5 = r0 + r1; /* c+d, a+b */
+ r6 = r2 + r3; /* g+h, e+f */
+ r5 >>= 16;
+ r5 |= (r6 << 16); /* e+f, c+d */
+ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
+ r4 += 0x100010; /* +16, +16 */
+ r5 = r1 + r2; /* d+g, b+e */
+ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
+ r4 >>= 5;
+ r13 |= r4; /* check clipping */
+
+ r5 = p_ref[dx+2];
+ r6 = p_ref[dx+4];
+ r5 |= (r6 << 16);
+ r4 += r5;
+ r4 += 0x10001;
+ r4 = (r4 >> 1) & 0xFF00FF;
+
+ r5 = p_ref[4]; /* i */
+ r6 = (r5 << 16);
+ r5 = r6 | (r2 >> 16);/* 0,i,0,g */
+ r5 += r1; /* d+i, b+g */ /* r5 not free */
+ r1 >>= 16;
+ r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
+ r1 += r2; /* f+g, d+e */
+ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
+ r0 >>= 16;
+ r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
+ r0 += r3; /* e+h, c+f */
+ r5 += 0x100010; /* 16,16 */
+ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
+ r5 >>= 5;
+ r13 |= r5; /* check clipping */
+
+ r0 = p_ref[dx+3];
+ r1 = p_ref[dx+5];
+ r0 |= (r1 << 16);
+ r5 += r0;
+ r5 += 0x10001;
+ r5 = (r5 >> 1) & 0xFF00FF;
+
+ r4 |= (r5 << 8); /* pack them together */
+ *p_cur++ = r4;
+ r1 = r3;
+ r0 = r2;
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
+
+ if (r13&0xFF000700) /* need clipping */
+ {
+ /* move back to the beginning of the line */
+ p_ref -= (ref_offset + blkwidth); /* input */
+ p_cur -= (outpitch >> 2);
+
+ tmp = (uint32)(p_ref + blkwidth);
+ for (; (uint32)p_ref < tmp;)
+ {
+
+ r0 = *p_ref++;
+ r1 = *p_ref++;
+ r2 = *p_ref++;
+ r3 = *p_ref++;
+ r4 = *p_ref++;
+ /* first pixel */
+ r5 = *p_ref++;
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dx] + 1);
+ pkres = (result >> 1) ;
+ /* second pixel */
+ r0 = *p_ref++;
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dx] + 1);
+ result = (result >> 1);
+ pkres |= (result << 8);
+ /* third pixel */
+ r1 = *p_ref++;
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dx] + 1);
+ result = (result >> 1);
+ pkres |= (result << 16);
+ /* fourth pixel */
+ r2 = *p_ref++;
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dx] + 1);
+ result = (result >> 1);
+ pkres |= (result << 24);
+ *p_cur++ = pkres; /* write 4 pixels */
+ p_ref -= 5; /* offset back to the middle of filter */
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset; /* move to the next line */
+ }
+ }
+ }
+ else
+ {
+ p_ref -= 2;
+ r13 = 0;
+ for (j = blkheight; j > 0; j--)
+ {
+ tmp = (uint32)(p_ref + blkwidth);
+ r0 = p_ref[0];
+ r1 = p_ref[2];
+ r0 |= (r1 << 16); /* 0,c,0,a */
+ r1 = p_ref[1];
+ r2 = p_ref[3];
+ r1 |= (r2 << 16); /* 0,d,0,b */
+ while ((uint32)p_ref < tmp)
+ {
+ r2 = *(p_ref += 4); /* move pointer to e */
+ r3 = p_ref[2];
+ r2 |= (r3 << 16); /* 0,g,0,e */
+ r3 = p_ref[1];
+ r4 = p_ref[3];
+ r3 |= (r4 << 16); /* 0,h,0,f */
+
+ r4 = r0 + r3; /* c+h, a+f */
+ r5 = r0 + r1; /* c+d, a+b */
+ r6 = r2 + r3; /* g+h, e+f */
+ r5 >>= 16;
+ r5 |= (r6 << 16); /* e+f, c+d */
+ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
+ r4 += 0x100010; /* +16, +16 */
+ r5 = r1 + r2; /* d+g, b+e */
+ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
+ r4 >>= 5;
+ r13 |= r4; /* check clipping */
+ r4 &= 0xFF00FF; /* mask */
+
+ r5 = p_ref[4]; /* i */
+ r6 = (r5 << 16);
+ r5 = r6 | (r2 >> 16);/* 0,i,0,g */
+ r5 += r1; /* d+i, b+g */ /* r5 not free */
+ r1 >>= 16;
+ r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
+ r1 += r2; /* f+g, d+e */
+ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
+ r0 >>= 16;
+ r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
+ r0 += r3; /* e+h, c+f */
+ r5 += 0x100010; /* 16,16 */
+ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
+ r5 >>= 5;
+ r13 |= r5; /* check clipping */
+ r5 &= 0xFF00FF; /* mask */
+
+ r4 |= (r5 << 8); /* pack them together */
+ *p_cur++ = r4;
+ r1 = r3;
+ r0 = r2;
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
+
+ if (r13&0xFF000700) /* need clipping */
+ {
+ /* move back to the beginning of the line */
+ p_ref -= (ref_offset + blkwidth); /* input */
+ p_cur -= (outpitch >> 2);
+
+ tmp = (uint32)(p_ref + blkwidth);
+ for (; (uint32)p_ref < tmp;)
+ {
+
+ r0 = *p_ref++;
+ r1 = *p_ref++;
+ r2 = *p_ref++;
+ r3 = *p_ref++;
+ r4 = *p_ref++;
+ /* first pixel */
+ r5 = *p_ref++;
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres = result;
+ /* second pixel */
+ r0 = *p_ref++;
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres |= (result << 8);
+ /* third pixel */
+ r1 = *p_ref++;
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres |= (result << 16);
+ /* fourth pixel */
+ r2 = *p_ref++;
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres |= (result << 24);
+ *p_cur++ = pkres; /* write 4 pixels */
+ p_ref -= 5;
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset;
+ }
+ }
+ }
+
+ return ;
+}
+
+void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dx)
+{
+ int *p_ref;
+ uint32 *p_cur;
+ uint32 tmp, pkres;
+ int result, result2, curr_offset, ref_offset;
+ int j, r0, r1, r2, r3, r4, r5;
+
+ p_cur = (uint32*)out; /* assume it's word aligned */
+ curr_offset = (outpitch - blkwidth) >> 2;
+ p_ref = in;
+ ref_offset = inpitch - blkwidth;
+
+ if (dx&1)
+ {
+ dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */
+
+ for (j = blkheight; j > 0 ; j--)
+ {
+ tmp = (uint32)(p_ref + blkwidth);
+ for (; (uint32)p_ref < tmp;)
+ {
+
+ r0 = p_ref[-2];
+ r1 = p_ref[-1];
+ r2 = *p_ref++;
+ r3 = *p_ref++;
+ r4 = *p_ref++;
+ /* first pixel */
+ r5 = *p_ref++;
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dx] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ pkres = (result >> 1);
+ /* second pixel */
+ r0 = *p_ref++;
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dx] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ pkres |= (result << 8);
+ /* third pixel */
+ r1 = *p_ref++;
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dx] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ pkres |= (result << 16);
+ /* fourth pixel */
+ r2 = *p_ref++;
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dx] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ pkres |= (result << 24);
+ *p_cur++ = pkres; /* write 4 pixels */
+ p_ref -= 3; /* offset back to the middle of filter */
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset; /* move to the next line */
+ }
+ }
+ else
+ {
+ for (j = blkheight; j > 0 ; j--)
+ {
+ tmp = (uint32)(p_ref + blkwidth);
+ for (; (uint32)p_ref < tmp;)
+ {
+
+ r0 = p_ref[-2];
+ r1 = p_ref[-1];
+ r2 = *p_ref++;
+ r3 = *p_ref++;
+ r4 = *p_ref++;
+ /* first pixel */
+ r5 = *p_ref++;
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ pkres = result;
+ /* second pixel */
+ r0 = *p_ref++;
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ pkres |= (result << 8);
+ /* third pixel */
+ r1 = *p_ref++;
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ pkres |= (result << 16);
+ /* fourth pixel */
+ r2 = *p_ref++;
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ pkres |= (result << 24);
+ *p_cur++ = pkres; /* write 4 pixels */
+ p_ref -= 3; /* offset back to the middle of filter */
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset; /* move to the next line */
+ }
+ }
+
+ return ;
+}
+
+void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,
+ int blkwidth, int blkheight)
+{
+ uint8 *p_ref;
+ int *p_cur;
+ uint32 tmp;
+ int result, curr_offset, ref_offset;
+ int j, r0, r1, r2, r3, r4, r5;
+
+ p_cur = out;
+ curr_offset = (outpitch - blkwidth);
+ p_ref = in;
+ ref_offset = inpitch - blkwidth;
+
+ for (j = blkheight; j > 0 ; j--)
+ {
+ tmp = (uint32)(p_ref + blkwidth);
+ for (; (uint32)p_ref < tmp;)
+ {
+
+ r0 = p_ref[-2];
+ r1 = p_ref[-1];
+ r2 = *p_ref++;
+ r3 = *p_ref++;
+ r4 = *p_ref++;
+ /* first pixel */
+ r5 = *p_ref++;
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ *p_cur++ = result;
+ /* second pixel */
+ r0 = *p_ref++;
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ *p_cur++ = result;
+ /* third pixel */
+ r1 = *p_ref++;
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ *p_cur++ = result;
+ /* fourth pixel */
+ r2 = *p_ref++;
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ *p_cur++ = result;
+ p_ref -= 3; /* move back to the middle of the filter */
+ }
+ p_cur += curr_offset; /* move to the next line */
+ p_ref += ref_offset;
+ }
+
+ return ;
+}
+void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dy)
+{
+ uint8 *p_cur, *p_ref;
+ uint32 tmp;
+ int result, curr_offset, ref_offset;
+ int j, i;
+ int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13;
+ uint8 tmp_in[24][24];
+
+ /* not word-aligned */
+ if (((uint32)in)&0x3)
+ {
+ eCreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);
+ in = &tmp_in[2][0];
+ inpitch = 24;
+ }
+ p_cur = out;
+ curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
+ ref_offset = blkheight * inpitch; /* for limit */
+
+ curr_offset += 3;
+
+ if (dy&1)
+ {
+ dy = (dy >> 1) ? 0 : -inpitch;
+
+ for (j = 0; j < blkwidth; j += 4, in += 4)
+ {
+ r13 = 0;
+ p_ref = in;
+ p_cur -= outpitch; /* compensate for the first offset */
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp) /* the loop un-rolled */
+ {
+ r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
+ p_ref += inpitch;
+ r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
+ r0 &= 0xFF00FF;
+
+ r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+
+ r0 += r1;
+ r6 += r7;
+
+ r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
+ r8 = (r2 >> 8) & 0xFF00FF;
+ r2 &= 0xFF00FF;
+
+ r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r1 += r2;
+
+ r7 += r8;
+
+ r0 += 20 * r1;
+ r6 += 20 * r7;
+ r0 += 0x100010;
+ r6 += 0x100010;
+
+ r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
+ r8 = (r2 >> 8) & 0xFF00FF;
+ r2 &= 0xFF00FF;
+
+ r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r1 += r2;
+
+ r7 += r8;
+
+ r0 -= 5 * r1;
+ r6 -= 5 * r7;
+
+ r0 >>= 5;
+ r6 >>= 5;
+ /* clip */
+ r13 |= r6;
+ r13 |= r0;
+ //CLIPPACK(r6,result)
+
+ r1 = *((uint32*)(p_ref + dy));
+ r2 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r0 += r1;
+ r6 += r2;
+ r0 += 0x10001;
+ r6 += 0x10001;
+ r0 = (r0 >> 1) & 0xFF00FF;
+ r6 = (r6 >> 1) & 0xFF00FF;
+
+ r0 |= (r6 << 8); /* pack it back */
+ *((uint32*)(p_cur += outpitch)) = r0;
+ }
+ p_cur += curr_offset; /* offset to the next pixel */
+ if (r13 & 0xFF000700) /* this column need clipping */
+ {
+ p_cur -= 4;
+ for (i = 0; i < 4; i++)
+ {
+ p_ref = in + i;
+ p_cur -= outpitch; /* compensate for the first offset */
+
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp)
+ { /* loop un-rolled */
+ r0 = *(p_ref - (inpitch << 1));
+ r1 = *(p_ref - inpitch);
+ r2 = *p_ref;
+ r3 = *(p_ref += inpitch); /* modify pointer before loading */
+ r4 = *(p_ref += inpitch);
+ /* first pixel */
+ r5 = *(p_ref += inpitch);
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dy-(inpitch<<1)] + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* second pixel */
+ r0 = *(p_ref += inpitch);
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dy-(inpitch<<1)] + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* third pixel */
+ r1 = *(p_ref += inpitch);
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dy-(inpitch<<1)] + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* fourth pixel */
+ r2 = *(p_ref += inpitch);
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ /* 3/4 pel, no need to clip */
+ result = (result + p_ref[dy-(inpitch<<1)] + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
+ }
+ p_cur += (curr_offset - 3);
+ }
+ }
+ }
+ }
+ else
+ {
+ for (j = 0; j < blkwidth; j += 4, in += 4)
+ {
+ r13 = 0;
+ p_ref = in;
+ p_cur -= outpitch; /* compensate for the first offset */
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp) /* the loop un-rolled */
+ {
+ r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
+ p_ref += inpitch;
+ r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
+ r0 &= 0xFF00FF;
+
+ r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+
+ r0 += r1;
+ r6 += r7;
+
+ r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
+ r8 = (r2 >> 8) & 0xFF00FF;
+ r2 &= 0xFF00FF;
+
+ r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r1 += r2;
+
+ r7 += r8;
+
+ r0 += 20 * r1;
+ r6 += 20 * r7;
+ r0 += 0x100010;
+ r6 += 0x100010;
+
+ r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
+ r8 = (r2 >> 8) & 0xFF00FF;
+ r2 &= 0xFF00FF;
+
+ r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r1 += r2;
+
+ r7 += r8;
+
+ r0 -= 5 * r1;
+ r6 -= 5 * r7;
+
+ r0 >>= 5;
+ r6 >>= 5;
+ /* clip */
+ r13 |= r6;
+ r13 |= r0;
+ //CLIPPACK(r6,result)
+ r0 &= 0xFF00FF;
+ r6 &= 0xFF00FF;
+ r0 |= (r6 << 8); /* pack it back */
+ *((uint32*)(p_cur += outpitch)) = r0;
+ }
+ p_cur += curr_offset; /* offset to the next pixel */
+ if (r13 & 0xFF000700) /* this column need clipping */
+ {
+ p_cur -= 4;
+ for (i = 0; i < 4; i++)
+ {
+ p_ref = in + i;
+ p_cur -= outpitch; /* compensate for the first offset */
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp)
+ { /* loop un-rolled */
+ r0 = *(p_ref - (inpitch << 1));
+ r1 = *(p_ref - inpitch);
+ r2 = *p_ref;
+ r3 = *(p_ref += inpitch); /* modify pointer before loading */
+ r4 = *(p_ref += inpitch);
+ /* first pixel */
+ r5 = *(p_ref += inpitch);
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ /* second pixel */
+ r0 = *(p_ref += inpitch);
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ /* third pixel */
+ r1 = *(p_ref += inpitch);
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ /* fourth pixel */
+ r2 = *(p_ref += inpitch);
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
+ }
+ p_cur += (curr_offset - 3);
+ }
+ }
+ }
+ }
+
+ return ;
+}
+
+void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,
+ int blkwidth, int blkheight)
+{
+ int *p_cur;
+ uint8 *p_ref;
+ uint32 tmp;
+ int result, curr_offset, ref_offset;
+ int j, r0, r1, r2, r3, r4, r5;
+
+ p_cur = out;
+ curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
+ ref_offset = blkheight * inpitch; /* for limit */
+
+ for (j = 0; j < blkwidth; j++)
+ {
+ p_cur -= outpitch; /* compensate for the first offset */
+ p_ref = in++;
+
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp)
+ { /* loop un-rolled */
+ r0 = *(p_ref - (inpitch << 1));
+ r1 = *(p_ref - inpitch);
+ r2 = *p_ref;
+ r3 = *(p_ref += inpitch); /* modify pointer before loading */
+ r4 = *(p_ref += inpitch);
+ /* first pixel */
+ r5 = *(p_ref += inpitch);
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ *(p_cur += outpitch) = result;
+ /* second pixel */
+ r0 = *(p_ref += inpitch);
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ *(p_cur += outpitch) = result;
+ /* third pixel */
+ r1 = *(p_ref += inpitch);
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ *(p_cur += outpitch) = result;
+ /* fourth pixel */
+ r2 = *(p_ref += inpitch);
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ *(p_cur += outpitch) = result;
+ p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
+ }
+ p_cur += curr_offset;
+ }
+
+ return ;
+}
+
+void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight, int dy)
+{
+ uint8 *p_cur;
+ int *p_ref;
+ uint32 tmp;
+ int result, result2, curr_offset, ref_offset;
+ int j, r0, r1, r2, r3, r4, r5;
+
+ p_cur = out;
+ curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */
+ ref_offset = blkheight * inpitch; /* for limit */
+
+ if (dy&1)
+ {
+ dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch;
+
+ for (j = 0; j < blkwidth; j++)
+ {
+ p_cur -= outpitch; /* compensate for the first offset */
+ p_ref = in++;
+
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp)
+ { /* loop un-rolled */
+ r0 = *(p_ref - (inpitch << 1));
+ r1 = *(p_ref - inpitch);
+ r2 = *p_ref;
+ r3 = *(p_ref += inpitch); /* modify pointer before loading */
+ r4 = *(p_ref += inpitch);
+ /* first pixel */
+ r5 = *(p_ref += inpitch);
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dy] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* second pixel */
+ r0 = *(p_ref += inpitch);
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dy] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* third pixel */
+ r1 = *(p_ref += inpitch);
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dy] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* fourth pixel */
+ r2 = *(p_ref += inpitch);
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ result2 = ((p_ref[dy] + 16) >> 5);
+ CLIP_RESULT(result2)
+ /* 3/4 pel, no need to clip */
+ result = (result + result2 + 1);
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
+ }
+ p_cur += curr_offset;
+ }
+ }
+ else
+ {
+ for (j = 0; j < blkwidth; j++)
+ {
+ p_cur -= outpitch; /* compensate for the first offset */
+ p_ref = in++;
+
+ tmp = (uint32)(p_ref + ref_offset); /* limit */
+ while ((uint32)p_ref < tmp)
+ { /* loop un-rolled */
+ r0 = *(p_ref - (inpitch << 1));
+ r1 = *(p_ref - inpitch);
+ r2 = *p_ref;
+ r3 = *(p_ref += inpitch); /* modify pointer before loading */
+ r4 = *(p_ref += inpitch);
+ /* first pixel */
+ r5 = *(p_ref += inpitch);
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ /* second pixel */
+ r0 = *(p_ref += inpitch);
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ /* third pixel */
+ r1 = *(p_ref += inpitch);
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ /* fourth pixel */
+ r2 = *(p_ref += inpitch);
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 512) >> 10;
+ CLIP_RESULT(result)
+ *(p_cur += outpitch) = result;
+ p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
+ }
+ p_cur += curr_offset;
+ }
+ }
+
+ return ;
+}
+
+void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,
+ uint8 *out, int outpitch,
+ int blkwidth, int blkheight)
+{
+ int j, i;
+ int result;
+ uint8 *p_cur, *p_ref, *p_tmp8;
+ int curr_offset, ref_offset;
+ uint8 tmp_res[24][24], tmp_in[24][24];
+ uint32 *p_tmp;
+ uint32 tmp, pkres, tmp_result;
+ int32 r0, r1, r2, r3, r4, r5;
+ int32 r6, r7, r8, r9, r10, r13;
+
+ ref_offset = inpitch - blkwidth;
+ p_ref = in1 - 2;
+ /* perform horizontal interpolation */
+ /* not word-aligned */
+ /* It is faster to read 1 byte at time to avoid calling CreateAlign */
+ /* if(((uint32)p_ref)&0x3)
+ {
+ CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight);
+ p_ref = &tmp_in[0][0];
+ ref_offset = 24-blkwidth;
+ }*/
+
+ p_tmp = (uint32*) & (tmp_res[0][0]);
+ for (j = blkheight; j > 0; j--)
+ {
+ r13 = 0;
+ tmp = (uint32)(p_ref + blkwidth);
+
+ //r0 = *((uint32*)p_ref); /* d,c,b,a */
+ //r1 = (r0>>8)&0xFF00FF; /* 0,d,0,b */
+ //r0 &= 0xFF00FF; /* 0,c,0,a */
+ /* It is faster to read 1 byte at a time */
+ r0 = p_ref[0];
+ r1 = p_ref[2];
+ r0 |= (r1 << 16); /* 0,c,0,a */
+ r1 = p_ref[1];
+ r2 = p_ref[3];
+ r1 |= (r2 << 16); /* 0,d,0,b */
+
+ while ((uint32)p_ref < tmp)
+ {
+ //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */
+ //r3 = (r2>>8)&0xFF00FF; /* 0,h,0,f */
+ //r2 &= 0xFF00FF; /* 0,g,0,e */
+ /* It is faster to read 1 byte at a time */
+ r2 = *(p_ref += 4);
+ r3 = p_ref[2];
+ r2 |= (r3 << 16); /* 0,g,0,e */
+ r3 = p_ref[1];
+ r4 = p_ref[3];
+ r3 |= (r4 << 16); /* 0,h,0,f */
+
+ r4 = r0 + r3; /* c+h, a+f */
+ r5 = r0 + r1; /* c+d, a+b */
+ r6 = r2 + r3; /* g+h, e+f */
+ r5 >>= 16;
+ r5 |= (r6 << 16); /* e+f, c+d */
+ r4 += r5 * 20; /* c+20*e+20*f+h, a+20*c+20*d+f */
+ r4 += 0x100010; /* +16, +16 */
+ r5 = r1 + r2; /* d+g, b+e */
+ r4 -= r5 * 5; /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */
+ r4 >>= 5;
+ r13 |= r4; /* check clipping */
+ r4 &= 0xFF00FF; /* mask */
+
+ r5 = p_ref[4]; /* i */
+ r6 = (r5 << 16);
+ r5 = r6 | (r2 >> 16);/* 0,i,0,g */
+ r5 += r1; /* d+i, b+g */ /* r5 not free */
+ r1 >>= 16;
+ r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */
+ r1 += r2; /* f+g, d+e */
+ r5 += 20 * r1; /* d+20f+20g+i, b+20d+20e+g */
+ r0 >>= 16;
+ r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */
+ r0 += r3; /* e+h, c+f */
+ r5 += 0x100010; /* 16,16 */
+ r5 -= r0 * 5; /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */
+ r5 >>= 5;
+ r13 |= r5; /* check clipping */
+ r5 &= 0xFF00FF; /* mask */
+
+ r4 |= (r5 << 8); /* pack them together */
+ *p_tmp++ = r4;
+ r1 = r3;
+ r0 = r2;
+ }
+ p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */
+ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
+
+ if (r13&0xFF000700) /* need clipping */
+ {
+ /* move back to the beginning of the line */
+ p_ref -= (ref_offset + blkwidth); /* input */
+ p_tmp -= 6; /* intermediate output */
+ tmp = (uint32)(p_ref + blkwidth);
+ while ((uint32)p_ref < tmp)
+ {
+ r0 = *p_ref++;
+ r1 = *p_ref++;
+ r2 = *p_ref++;
+ r3 = *p_ref++;
+ r4 = *p_ref++;
+ /* first pixel */
+ r5 = *p_ref++;
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres = result;
+ /* second pixel */
+ r0 = *p_ref++;
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres |= (result << 8);
+ /* third pixel */
+ r1 = *p_ref++;
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres |= (result << 16);
+ /* fourth pixel */
+ r2 = *p_ref++;
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ pkres |= (result << 24);
+
+ *p_tmp++ = pkres; /* write 4 pixel */
+ p_ref -= 5;
+ }
+ p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */
+ p_ref += ref_offset; /* ref_offset = inpitch-blkwidth; */
+ }
+ }
+
+ /* perform vertical interpolation */
+ /* not word-aligned */
+ if (((uint32)in2)&0x3)
+ {
+ eCreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);
+ in2 = &tmp_in[2][0];
+ inpitch = 24;
+ }
+
+ p_cur = out;
+ curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */
+ pkres = blkheight * inpitch; /* reuse it for limit */
+
+ curr_offset += 3;
+
+ for (j = 0; j < blkwidth; j += 4, in2 += 4)
+ {
+ r13 = 0;
+ p_ref = in2;
+ p_tmp8 = &(tmp_res[0][j]); /* intermediate result */
+ p_tmp8 -= 24; /* compensate for the first offset */
+ p_cur -= outpitch; /* compensate for the first offset */
+ tmp = (uint32)(p_ref + pkres); /* limit */
+ while ((uint32)p_ref < tmp) /* the loop un-rolled */
+ {
+ /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign */
+ /*p_ref8 = p_ref-(inpitch<<1); r0 = p_ref8[0]; r1 = p_ref8[2];
+ r0 |= (r1<<16); r6 = p_ref8[1]; r1 = p_ref8[3];
+ r6 |= (r1<<16); p_ref+=inpitch; */
+ r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */
+ p_ref += inpitch;
+ r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */
+ r0 &= 0xFF00FF;
+
+ /*p_ref8 = p_ref+(inpitch<<1);
+ r1 = p_ref8[0]; r7 = p_ref8[2]; r1 |= (r7<<16);
+ r7 = p_ref8[1]; r2 = p_ref8[3]; r7 |= (r2<<16);*/
+ r1 = *((uint32*)(p_ref + (inpitch << 1))); /* r1, r7, ref[3] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+
+ r0 += r1;
+ r6 += r7;
+
+ /*r2 = p_ref[0]; r8 = p_ref[2]; r2 |= (r8<<16);
+ r8 = p_ref[1]; r1 = p_ref[3]; r8 |= (r1<<16);*/
+ r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */
+ r8 = (r2 >> 8) & 0xFF00FF;
+ r2 &= 0xFF00FF;
+
+ /*p_ref8 = p_ref-inpitch; r1 = p_ref8[0]; r7 = p_ref8[2];
+ r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1];
+ r2 = p_ref8[3]; r7 |= (r2<<16);*/
+ r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r1 += r2;
+
+ r7 += r8;
+
+ r0 += 20 * r1;
+ r6 += 20 * r7;
+ r0 += 0x100010;
+ r6 += 0x100010;
+
+ /*p_ref8 = p_ref-(inpitch<<1); r2 = p_ref8[0]; r8 = p_ref8[2];
+ r2 |= (r8<<16); r8 = p_ref8[1]; r1 = p_ref8[3]; r8 |= (r1<<16);*/
+ r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */
+ r8 = (r2 >> 8) & 0xFF00FF;
+ r2 &= 0xFF00FF;
+
+ /*p_ref8 = p_ref+inpitch; r1 = p_ref8[0]; r7 = p_ref8[2];
+ r1 |= (r7<<16); r1 += r2; r7 = p_ref8[1];
+ r2 = p_ref8[3]; r7 |= (r2<<16);*/
+ r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */
+ r7 = (r1 >> 8) & 0xFF00FF;
+ r1 &= 0xFF00FF;
+ r1 += r2;
+
+ r7 += r8;
+
+ r0 -= 5 * r1;
+ r6 -= 5 * r7;
+
+ r0 >>= 5;
+ r6 >>= 5;
+ /* clip */
+ r13 |= r6;
+ r13 |= r0;
+ //CLIPPACK(r6,result)
+ /* add with horizontal results */
+ r10 = *((uint32*)(p_tmp8 += 24));
+ r9 = (r10 >> 8) & 0xFF00FF;
+ r10 &= 0xFF00FF;
+
+ r0 += r10;
+ r0 += 0x10001;
+ r0 = (r0 >> 1) & 0xFF00FF; /* mask to 8 bytes */
+
+ r6 += r9;
+ r6 += 0x10001;
+ r6 = (r6 >> 1) & 0xFF00FF; /* mask to 8 bytes */
+
+ r0 |= (r6 << 8); /* pack it back */
+ *((uint32*)(p_cur += outpitch)) = r0;
+ }
+ p_cur += curr_offset; /* offset to the next pixel */
+ if (r13 & 0xFF000700) /* this column need clipping */
+ {
+ p_cur -= 4;
+ for (i = 0; i < 4; i++)
+ {
+ p_ref = in2 + i;
+ p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */
+ p_tmp8 -= 24; /* compensate for the first offset */
+ p_cur -= outpitch; /* compensate for the first offset */
+ tmp = (uint32)(p_ref + pkres); /* limit */
+ while ((uint32)p_ref < tmp) /* the loop un-rolled */
+ {
+ r0 = *(p_ref - (inpitch << 1));
+ r1 = *(p_ref - inpitch);
+ r2 = *p_ref;
+ r3 = *(p_ref += inpitch); /* modify pointer before loading */
+ r4 = *(p_ref += inpitch);
+ /* first pixel */
+ r5 = *(p_ref += inpitch);
+ result = (r0 + r5);
+ r0 = (r1 + r4);
+ result -= (r0 * 5);//result -= r0; result -= (r0<<2);
+ r0 = (r2 + r3);
+ result += (r0 * 20);//result += (r0<<4); result += (r0<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ tmp_result = *(p_tmp8 += 24); /* modify pointer before loading */
+ result = (result + tmp_result + 1); /* no clip */
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* second pixel */
+ r0 = *(p_ref += inpitch);
+ result = (r1 + r0);
+ r1 = (r2 + r5);
+ result -= (r1 * 5);//result -= r1; result -= (r1<<2);
+ r1 = (r3 + r4);
+ result += (r1 * 20);//result += (r1<<4); result += (r1<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ tmp_result = *(p_tmp8 += 24); /* intermediate result */
+ result = (result + tmp_result + 1); /* no clip */
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* third pixel */
+ r1 = *(p_ref += inpitch);
+ result = (r2 + r1);
+ r2 = (r3 + r0);
+ result -= (r2 * 5);//result -= r2; result -= (r2<<2);
+ r2 = (r4 + r5);
+ result += (r2 * 20);//result += (r2<<4); result += (r2<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ tmp_result = *(p_tmp8 += 24); /* intermediate result */
+ result = (result + tmp_result + 1); /* no clip */
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ /* fourth pixel */
+ r2 = *(p_ref += inpitch);
+ result = (r3 + r2);
+ r3 = (r4 + r1);
+ result -= (r3 * 5);//result -= r3; result -= (r3<<2);
+ r3 = (r5 + r0);
+ result += (r3 * 20);//result += (r3<<4); result += (r3<<2);
+ result = (result + 16) >> 5;
+ CLIP_RESULT(result)
+ tmp_result = *(p_tmp8 += 24); /* intermediate result */
+ result = (result + tmp_result + 1); /* no clip */
+ result = (result >> 1);
+ *(p_cur += outpitch) = result;
+ p_ref -= (inpitch << 1); /* move back to center of the filter of the next one */
+ }
+ p_cur += (curr_offset - 3);
+ }
+ }
+ }
+
+ return ;
+}
+
+/* position G */
+void eFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,
+ int blkwidth, int blkheight)
+{
+ int i, j;
+ int offset_in = inpitch - blkwidth;
+ int offset_out = outpitch - blkwidth;
+ uint32 temp;
+ uint8 byte;
+
+ if (((uint32)in)&3)
+ {
+ for (j = blkheight; j > 0; j--)
+ {
+ for (i = blkwidth; i > 0; i -= 4)
+ {
+ temp = *in++;
+ byte = *in++;
+ temp |= (byte << 8);
+ byte = *in++;
+ temp |= (byte << 16);
+ byte = *in++;
+ temp |= (byte << 24);
+
+ *((uint32*)out) = temp; /* write 4 bytes */
+ out += 4;
+ }
+ out += offset_out;
+ in += offset_in;
+ }
+ }
+ else
+ {
+ for (j = blkheight; j > 0; j--)
+ {
+ for (i = blkwidth; i > 0; i -= 4)
+ {
+ temp = *((uint32*)in);
+ *((uint32*)out) = temp;
+ in += 4;
+ out += 4;
+ }
+ out += offset_out;
+ in += offset_in;
+ }
+ }
+ return ;
+}
+
+void ePadChroma(uint8 *ref, int picwidth, int picheight, int picpitch, int x_pos, int y_pos)
+{
+ int pad_height;
+ int pad_width;
+ uint8 *start;
+ uint32 word1, word2, word3;
+ int offset, j;
+
+
+ pad_height = 8 + ((y_pos & 7) ? 1 : 0);
+ pad_width = 8 + ((x_pos & 7) ? 1 : 0);
+
+ y_pos >>= 3;
+ x_pos >>= 3;
+ // pad vertical first
+ if (y_pos < 0) // need to pad up
+ {
+ if (x_pos < -8) start = ref - 8;
+ else if (x_pos + pad_width > picwidth + 7) start = ref + picwidth + 7 - pad_width;
+ else start = ref + x_pos;
+
+ /* word-align start */
+ offset = (uint32)start & 0x3;
+ if (offset) start -= offset;
+
+ word1 = *((uint32*)start);
+ word2 = *((uint32*)(start + 4));
+ word3 = *((uint32*)(start + 8));
+
+ /* pad up N rows */
+ j = -y_pos;
+ if (j > 8) j = 8;
+ while (j--)
+ {
+ *((uint32*)(start -= picpitch)) = word1;
+ *((uint32*)(start + 4)) = word2;
+ *((uint32*)(start + 8)) = word3;
+ }
+
+ }
+ else if (y_pos + pad_height >= picheight) /* pad down */
+ {
+ if (x_pos < -8) start = ref + picpitch * (picheight - 1) - 8;
+ else if (x_pos + pad_width > picwidth + 7) start = ref + picpitch * (picheight - 1) +
+ picwidth + 7 - pad_width;
+ else start = ref + picpitch * (picheight - 1) + x_pos;
+
+ /* word-align start */
+ offset = (uint32)start & 0x3;
+ if (offset) start -= offset;
+
+ word1 = *((uint32*)start);
+ word2 = *((uint32*)(start + 4));
+ word3 = *((uint32*)(start + 8));
+
+ /* pad down N rows */
+ j = y_pos + pad_height - picheight;
+ if (j > 8) j = 8;
+ while (j--)
+ {
+ *((uint32*)(start += picpitch)) = word1;
+ *((uint32*)(start + 4)) = word2;
+ *((uint32*)(start + 8)) = word3;
+ }
+ }
+
+ /* now pad horizontal */
+ if (x_pos < 0) // pad left
+ {
+ if (y_pos < -8) start = ref - (picpitch << 3);
+ else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch;
+ else start = ref + y_pos * picpitch;
+
+ // now pad left 8 pixels for pad_height rows */
+ j = pad_height;
+ start -= picpitch;
+ while (j--)
+ {
+ word1 = *(start += picpitch);
+ word1 |= (word1 << 8);
+ word1 |= (word1 << 16);
+ *((uint32*)(start - 8)) = word1;
+ *((uint32*)(start - 4)) = word1;
+ }
+ }
+ else if (x_pos + pad_width >= picwidth) /* pad right */
+ {
+ if (y_pos < -8) start = ref - (picpitch << 3) + picwidth - 1;
+ else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch + picwidth - 1;
+ else start = ref + y_pos * picpitch + picwidth - 1;
+
+ // now pad right 8 pixels for pad_height rows */
+ j = pad_height;
+ start -= picpitch;
+ while (j--)
+ {
+ word1 = *(start += picpitch);
+ word1 |= (word1 << 8);
+ word1 |= (word1 << 16);
+ *((uint32*)(start + 1)) = word1;
+ *((uint32*)(start + 5)) = word1;
+ }
+ }
+
+ return ;
+}
+
+
+void eChromaMotionComp(uint8 *ref, int picwidth, int picheight,
+ int x_pos, int y_pos,
+ uint8 *pred, int picpitch,
+ int blkwidth, int blkheight)
+{
+ int dx, dy;
+ int offset_dx, offset_dy;
+ int index;
+
+ ePadChroma(ref, picwidth, picheight, picpitch, x_pos, y_pos);
+
+ dx = x_pos & 7;
+ dy = y_pos & 7;
+ offset_dx = (dx + 7) >> 3;
+ offset_dy = (dy + 7) >> 3;
+ x_pos = x_pos >> 3; /* round it to full-pel resolution */
+ y_pos = y_pos >> 3;
+
+ ref += y_pos * picpitch + x_pos;
+
+ index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7);
+
+ (*(eChromaMC_SIMD[index]))(ref, picpitch , dx, dy, pred, picpitch, blkwidth, blkheight);
+ return ;
+}
+
+
+/* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done) */
+void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ int32 r0, r1, r2, r3, result0, result1;
+ uint8 temp[288];
+ uint8 *ref, *out;
+ int i, j;
+ int dx_8 = 8 - dx;
+ int dy_8 = 8 - dy;
+
+ /* horizontal first */
+ out = temp;
+ for (i = 0; i < blkheight + 1; i++)
+ {
+ ref = pRef;
+ r0 = ref[0];
+ for (j = 0; j < blkwidth; j += 4)
+ {
+ r0 |= (ref[2] << 16);
+ result0 = dx_8 * r0;
+
+ r1 = ref[1] | (ref[3] << 16);
+ result0 += dx * r1;
+ *(int32 *)out = result0;
+
+ result0 = dx_8 * r1;
+
+ r2 = ref[4];
+ r0 = r0 >> 16;
+ r1 = r0 | (r2 << 16);
+ result0 += dx * r1;
+ *(int32 *)(out + 16) = result0;
+
+ ref += 4;
+ out += 4;
+ r0 = r2;
+ }
+ pRef += srcPitch;
+ out += (32 - blkwidth);
+ }
+
+// pRef -= srcPitch*(blkheight+1);
+ ref = temp;
+
+ for (j = 0; j < blkwidth; j += 4)
+ {
+ r0 = *(int32 *)ref;
+ r1 = *(int32 *)(ref + 16);
+ ref += 32;
+ out = pOut;
+ for (i = 0; i < (blkheight >> 1); i++)
+ {
+ result0 = dy_8 * r0 + 0x00200020;
+ r2 = *(int32 *)ref;
+ result0 += dy * r2;
+ result0 >>= 6;
+ result0 &= 0x00FF00FF;
+ r0 = r2;
+
+ result1 = dy_8 * r1 + 0x00200020;
+ r3 = *(int32 *)(ref + 16);
+ result1 += dy * r3;
+ result1 >>= 6;
+ result1 &= 0x00FF00FF;
+ r1 = r3;
+ *(int32 *)out = result0 | (result1 << 8);
+ out += predPitch;
+ ref += 32;
+
+ result0 = dy_8 * r0 + 0x00200020;
+ r2 = *(int32 *)ref;
+ result0 += dy * r2;
+ result0 >>= 6;
+ result0 &= 0x00FF00FF;
+ r0 = r2;
+
+ result1 = dy_8 * r1 + 0x00200020;
+ r3 = *(int32 *)(ref + 16);
+ result1 += dy * r3;
+ result1 >>= 6;
+ result1 &= 0x00FF00FF;
+ r1 = r3;
+ *(int32 *)out = result0 | (result1 << 8);
+ out += predPitch;
+ ref += 32;
+ }
+ pOut += 4;
+ ref = temp + 4; /* since it can only iterate twice max */
+ }
+ return;
+}
+
+void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ (void)(dy);
+
+ int32 r0, r1, r2, result0, result1;
+ uint8 *ref, *out;
+ int i, j;
+ int dx_8 = 8 - dx;
+
+ /* horizontal first */
+ for (i = 0; i < blkheight; i++)
+ {
+ ref = pRef;
+ out = pOut;
+
+ r0 = ref[0];
+ for (j = 0; j < blkwidth; j += 4)
+ {
+ r0 |= (ref[2] << 16);
+ result0 = dx_8 * r0 + 0x00040004;
+
+ r1 = ref[1] | (ref[3] << 16);
+ result0 += dx * r1;
+ result0 >>= 3;
+ result0 &= 0x00FF00FF;
+
+ result1 = dx_8 * r1 + 0x00040004;
+
+ r2 = ref[4];
+ r0 = r0 >> 16;
+ r1 = r0 | (r2 << 16);
+ result1 += dx * r1;
+ result1 >>= 3;
+ result1 &= 0x00FF00FF;
+
+ *(int32 *)out = result0 | (result1 << 8);
+
+ ref += 4;
+ out += 4;
+ r0 = r2;
+ }
+
+ pRef += srcPitch;
+ pOut += predPitch;
+ }
+ return;
+}
+
+void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ (void)(dx);
+
+ int32 r0, r1, r2, r3, result0, result1;
+ int i, j;
+ uint8 *ref, *out;
+ int dy_8 = 8 - dy;
+ /* vertical first */
+ for (i = 0; i < blkwidth; i += 4)
+ {
+ ref = pRef;
+ out = pOut;
+
+ r0 = ref[0] | (ref[2] << 16);
+ r1 = ref[1] | (ref[3] << 16);
+ ref += srcPitch;
+ for (j = 0; j < blkheight; j++)
+ {
+ result0 = dy_8 * r0 + 0x00040004;
+ r2 = ref[0] | (ref[2] << 16);
+ result0 += dy * r2;
+ result0 >>= 3;
+ result0 &= 0x00FF00FF;
+ r0 = r2;
+
+ result1 = dy_8 * r1 + 0x00040004;
+ r3 = ref[1] | (ref[3] << 16);
+ result1 += dy * r3;
+ result1 >>= 3;
+ result1 &= 0x00FF00FF;
+ r1 = r3;
+ *(int32 *)out = result0 | (result1 << 8);
+ ref += srcPitch;
+ out += predPitch;
+ }
+ pOut += 4;
+ pRef += 4;
+ }
+ return;
+}
+
+void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ (void)(blkwidth);
+
+ int32 r0, r1, temp0, temp1, result;
+ int32 temp[9];
+ int32 *out;
+ int i, r_temp;
+ int dy_8 = 8 - dy;
+
+ /* horizontal first */
+ out = temp;
+ for (i = 0; i < blkheight + 1; i++)
+ {
+ r_temp = pRef[1];
+ temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]);
+ temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp);
+ r0 = temp0 | (temp1 << 16);
+ *out++ = r0;
+ pRef += srcPitch;
+ }
+
+ pRef -= srcPitch * (blkheight + 1);
+
+ out = temp;
+
+ r0 = *out++;
+
+ for (i = 0; i < blkheight; i++)
+ {
+ result = dy_8 * r0 + 0x00200020;
+ r1 = *out++;
+ result += dy * r1;
+ result >>= 6;
+ result &= 0x00FF00FF;
+ *(int16 *)pOut = (result >> 8) | (result & 0xFF);
+ r0 = r1;
+ pOut += predPitch;
+ }
+ return;
+}
+
+void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ (void)(dy);
+ (void)(blkwidth);
+
+ int i, temp, temp0, temp1;
+
+ /* horizontal first */
+ for (i = 0; i < blkheight; i++)
+ {
+ temp = pRef[1];
+ temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3;
+ temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3;
+
+ *(int16 *)pOut = temp0 | (temp1 << 8);
+ pRef += srcPitch;
+ pOut += predPitch;
+
+ }
+ return;
+}
+void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ (void)(dx);
+ (void)(blkwidth);
+
+ int32 r0, r1, result;
+ int i;
+ int dy_8 = 8 - dy;
+ r0 = pRef[0] | (pRef[1] << 16);
+ pRef += srcPitch;
+ for (i = 0; i < blkheight; i++)
+ {
+ result = dy_8 * r0 + 0x00040004;
+ r1 = pRef[0] | (pRef[1] << 16);
+ result += dy * r1;
+ result >>= 3;
+ result &= 0x00FF00FF;
+ *(int16 *)pOut = (result >> 8) | (result & 0xFF);
+ r0 = r1;
+ pRef += srcPitch;
+ pOut += predPitch;
+ }
+ return;
+}
+
+void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,
+ uint8 *pOut, int predPitch, int blkwidth, int blkheight)
+{
+ (void)(dx);
+ (void)(dy);
+
+ int i, j;
+ int offset_in = srcPitch - blkwidth;
+ int offset_out = predPitch - blkwidth;
+ uint16 temp;
+ uint8 byte;
+
+ if (((uint32)pRef)&1)
+ {
+ for (j = blkheight; j > 0; j--)
+ {
+ for (i = blkwidth; i > 0; i -= 2)
+ {
+ temp = *pRef++;
+ byte = *pRef++;
+ temp |= (byte << 8);
+ *((uint16*)pOut) = temp; /* write 2 bytes */
+ pOut += 2;
+ }
+ pOut += offset_out;
+ pRef += offset_in;
+ }
+ }
+ else
+ {
+ for (j = blkheight; j > 0; j--)
+ {
+ for (i = blkwidth; i > 0; i -= 2)
+ {
+ temp = *((uint16*)pRef);
+ *((uint16*)pOut) = temp;
+ pRef += 2;
+ pOut += 2;
+ }
+ pOut += offset_out;
+ pRef += offset_in;
+ }
+ }
+ return ;
+}
diff --git a/media/libstagefright/codecs/avc/enc/src/motion_est.cpp b/media/libstagefright/codecs/avc/enc/src/motion_est.cpp
new file mode 100644
index 0000000..f650ef9
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/motion_est.cpp
@@ -0,0 +1,1774 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+#define MIN_GOP 1 /* minimum size of GOP, 1/23/01, need to be tested */
+
+#define DEFAULT_REF_IDX 0 /* always from the first frame in the reflist */
+
+#define ALL_CAND_EQUAL 10 /* any number greater than 5 will work */
+
+
+/* from TMN 3.2 */
+#define PREF_NULL_VEC 129 /* zero vector bias */
+#define PREF_16_VEC 129 /* 1MV bias versus 4MVs*/
+#define PREF_INTRA 3024//512 /* bias for INTRA coding */
+
+const static int tab_exclude[9][9] = // [last_loc][curr_loc]
+{
+ {0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 1, 1, 1, 0, 0},
+ {0, 0, 0, 0, 1, 1, 1, 1, 1},
+ {0, 0, 0, 0, 0, 0, 1, 1, 1},
+ {0, 1, 1, 0, 0, 0, 1, 1, 1},
+ {0, 1, 1, 0, 0, 0, 0, 0, 1},
+ {0, 1, 1, 1, 1, 0, 0, 0, 1},
+ {0, 0, 1, 1, 1, 0, 0, 0, 0},
+ {0, 0, 1, 1, 1, 1, 1, 0, 0}
+}; //to decide whether to continue or compute
+
+const static int refine_next[8][2] = /* [curr_k][increment] */
+{
+ {0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}
+};
+
+#ifdef _SAD_STAT
+uint32 num_MB = 0;
+uint32 num_cand = 0;
+#endif
+
+/************************************************************************/
+#define TH_INTER_2 100 /* temporary for now */
+
+//#define FIXED_INTERPRED_MODE AVC_P16
+#define FIXED_REF_IDX 0
+#define FIXED_MVX 0
+#define FIXED_MVY 0
+
+// only use when AVC_P8 or AVC_P8ref0
+#define FIXED_SUBMB_MODE AVC_4x4
+/*************************************************************************/
+
+/* Initialize arrays necessary for motion search */
+AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ int search_range = rateCtrl->mvRange;
+ int number_of_subpel_positions = 4 * (2 * search_range + 3);
+ int max_mv_bits, max_mvd;
+ int temp_bits = 0;
+ uint8 *mvbits;
+ int bits, imax, imin, i;
+ uint8* subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions
+
+
+ while (number_of_subpel_positions > 0)
+ {
+ temp_bits++;
+ number_of_subpel_positions >>= 1;
+ }
+
+ max_mv_bits = 3 + 2 * temp_bits;
+ max_mvd = (1 << (max_mv_bits >> 1)) - 1;
+
+ encvid->mvbits_array = (uint8*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
+ sizeof(uint8) * (2 * max_mvd + 1), DEFAULT_ATTR);
+
+ if (encvid->mvbits_array == NULL)
+ {
+ return AVCENC_MEMORY_FAIL;
+ }
+
+ mvbits = encvid->mvbits = encvid->mvbits_array + max_mvd;
+
+ mvbits[0] = 1;
+ for (bits = 3; bits <= max_mv_bits; bits += 2)
+ {
+ imax = 1 << (bits >> 1);
+ imin = imax >> 1;
+
+ for (i = imin; i < imax; i++) mvbits[-i] = mvbits[i] = bits;
+ }
+
+ /* initialize half-pel search */
+ encvid->hpel_cand[0] = subpel_pred + REF_CENTER;
+ encvid->hpel_cand[1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1 ;
+ encvid->hpel_cand[2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
+ encvid->hpel_cand[3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
+ encvid->hpel_cand[4] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
+ encvid->hpel_cand[5] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;
+ encvid->hpel_cand[6] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->hpel_cand[7] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->hpel_cand[8] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+
+ /* For quarter-pel interpolation around best half-pel result */
+
+ encvid->bilin_base[0][0] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[0][1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
+ encvid->bilin_base[0][2] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->bilin_base[0][3] = subpel_pred + REF_CENTER;
+
+
+ encvid->bilin_base[1][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[1][1] = subpel_pred + REF_CENTER - 24;
+ encvid->bilin_base[1][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[1][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
+
+ encvid->bilin_base[2][0] = subpel_pred + REF_CENTER - 24;
+ encvid->bilin_base[2][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
+ encvid->bilin_base[2][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
+ encvid->bilin_base[2][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
+
+ encvid->bilin_base[3][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;
+ encvid->bilin_base[3][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;
+ encvid->bilin_base[3][2] = subpel_pred + REF_CENTER;
+ encvid->bilin_base[3][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
+
+ encvid->bilin_base[4][0] = subpel_pred + REF_CENTER;
+ encvid->bilin_base[4][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
+ encvid->bilin_base[4][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;
+ encvid->bilin_base[4][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;
+
+ encvid->bilin_base[5][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->bilin_base[5][1] = subpel_pred + REF_CENTER;
+ encvid->bilin_base[5][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->bilin_base[5][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;
+
+ encvid->bilin_base[6][0] = subpel_pred + REF_CENTER - 1;
+ encvid->bilin_base[6][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->bilin_base[6][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 24;
+ encvid->bilin_base[6][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+
+ encvid->bilin_base[7][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[7][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[7][2] = subpel_pred + REF_CENTER - 1;
+ encvid->bilin_base[7][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;
+
+ encvid->bilin_base[8][0] = subpel_pred + REF_CENTER - 25;
+ encvid->bilin_base[8][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[8][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE;
+ encvid->bilin_base[8][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;
+
+
+ return AVCENC_SUCCESS;
+}
+
+/* Clean-up memory */
+void CleanMotionSearchModule(AVCHandle *avcHandle)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+
+ if (encvid->mvbits_array)
+ {
+ avcHandle->CBAVC_Free(avcHandle->userData, (int)(encvid->mvbits_array));
+ encvid->mvbits = NULL;
+ }
+
+ return ;
+}
+
+
+bool IntraDecisionABE(int *min_cost, uint8 *cur, int pitch, bool ave)
+{
+ int j;
+ uint8 *out;
+ int temp, SBE;
+ OsclFloat ABE;
+ bool intra = true;
+
+ SBE = 0;
+ /* top neighbor */
+ out = cur - pitch;
+ for (j = 0; j < 16; j++)
+ {
+ temp = out[j] - cur[j];
+ SBE += ((temp >= 0) ? temp : -temp);
+ }
+
+ /* left neighbor */
+ out = cur - 1;
+ out -= pitch;
+ cur -= pitch;
+ for (j = 0; j < 16; j++)
+ {
+ temp = *(out += pitch) - *(cur += pitch);
+ SBE += ((temp >= 0) ? temp : -temp);
+ }
+
+ /* compare mincost/384 and SBE/64 */
+ ABE = SBE / 32.0; //ABE = SBE/64.0; //
+ if (ABE >= *min_cost / 256.0) //if( ABE*0.8 >= min_cost/384.0) //
+ {
+ intra = false; // no possibility of intra, just use inter
+ }
+ else
+ {
+ if (ave == true)
+ {
+ *min_cost = (*min_cost + (int)(SBE * 8)) >> 1; // possibility of intra, averaging the cost
+ }
+ else
+ {
+ *min_cost = (int)(SBE * 8);
+ }
+ }
+
+ return intra;
+}
+
+/******* main function for macroblock prediction for the entire frame ***/
+/* if turns out to be IDR frame, set video->nal_unit_type to AVC_NALTYPE_IDR */
+void AVCMotionEstimation(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ int slice_type = video->slice_type;
+ AVCFrameIO *currInput = encvid->currInput;
+ AVCPictureData *refPic = video->RefPicList0[0];
+ int i, j, k;
+ int mbwidth = video->PicWidthInMbs;
+ int mbheight = video->PicHeightInMbs;
+ int totalMB = video->PicSizeInMbs;
+ int pitch = currInput->pitch;
+ AVCMacroblock *currMB, *mblock = video->mblock;
+ AVCMV *mot_mb_16x16, *mot16x16 = encvid->mot16x16;
+ // AVCMV *mot_mb_16x8, *mot_mb_8x16, *mot_mb_8x8, etc;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ uint8 *intraSearch = encvid->intraSearch;
+ uint FS_en = encvid->fullsearch_enable;
+
+ int NumIntraSearch, start_i, numLoop, incr_i;
+ int mbnum, offset;
+ uint8 *cur, *best_cand[5];
+ int totalSAD = 0; /* average SAD for rate control */
+ int type_pred;
+ int abe_cost;
+
+#ifdef HTFM
+ /***** HYPOTHESIS TESTING ********/ /* 2/28/01 */
+ int collect = 0;
+ HTFM_Stat htfm_stat;
+ double newvar[16];
+ double exp_lamda[15];
+ /*********************************/
+#endif
+ int hp_guess = 0;
+ uint32 mv_uint32;
+
+ offset = 0;
+
+ if (slice_type == AVC_I_SLICE)
+ {
+ /* cannot do I16 prediction here because it needs full decoding. */
+ for (i = 0; i < totalMB; i++)
+ {
+ encvid->min_cost[i] = 0x7FFFFFFF; /* max value for int */
+ }
+
+ memset(intraSearch, 1, sizeof(uint8)*totalMB);
+
+ encvid->firstIntraRefreshMBIndx = 0; /* reset this */
+
+ return ;
+ }
+ else // P_SLICE
+ {
+ for (i = 0; i < totalMB; i++)
+ {
+ mblock[i].mb_intra = 0;
+ }
+ memset(intraSearch, 1, sizeof(uint8)*totalMB);
+ }
+
+ if (refPic->padded == 0)
+ {
+ AVCPaddingEdge(refPic);
+ refPic->padded = 1;
+ }
+ /* Random INTRA update */
+ if (rateCtrl->intraMBRate)
+ {
+ AVCRasterIntraUpdate(encvid, mblock, totalMB, rateCtrl->intraMBRate);
+ }
+
+ encvid->sad_extra_info = NULL;
+#ifdef HTFM
+ /***** HYPOTHESIS TESTING ********/
+ InitHTFM(video, &htfm_stat, newvar, &collect);
+ /*********************************/
+#endif
+
+ if ((rateCtrl->scdEnable == 1)
+ && ((rateCtrl->frame_rate < 5.0) || (video->sliceHdr->frame_num > MIN_GOP)))
+ /* do not try to detect a new scene if low frame rate and too close to previous I-frame */
+ {
+ incr_i = 2;
+ numLoop = 2;
+ start_i = 1;
+ type_pred = 0; /* for initial candidate selection */
+ }
+ else
+ {
+ incr_i = 1;
+ numLoop = 1;
+ start_i = 0;
+ type_pred = 2;
+ }
+
+ /* First pass, loop thru half the macroblock */
+ /* determine scene change */
+ /* Second pass, for the rest of macroblocks */
+ NumIntraSearch = 0; // to be intra searched in the encoding loop.
+ while (numLoop--)
+ {
+ for (j = 0; j < mbheight; j++)
+ {
+ if (incr_i > 1)
+ start_i = (start_i == 0 ? 1 : 0) ; /* toggle 0 and 1 */
+
+ offset = pitch * (j << 4) + (start_i << 4);
+
+ mbnum = j * mbwidth + start_i;
+
+ for (i = start_i; i < mbwidth; i += incr_i)
+ {
+ video->mbNum = mbnum;
+ video->currMB = currMB = mblock + mbnum;
+ mot_mb_16x16 = mot16x16 + mbnum;
+
+ cur = currInput->YCbCr[0] + offset;
+
+ if (currMB->mb_intra == 0) /* for INTER mode */
+ {
+#if defined(HTFM)
+ HTFMPrepareCurMB_AVC(encvid, &htfm_stat, cur, pitch);
+#else
+ AVCPrepareCurMB(encvid, cur, pitch);
+#endif
+ /************************************************************/
+ /******** full-pel 1MV search **********************/
+
+ AVCMBMotionSearch(encvid, cur, best_cand, i << 4, j << 4, type_pred,
+ FS_en, &hp_guess);
+
+ abe_cost = encvid->min_cost[mbnum] = mot_mb_16x16->sad;
+
+ /* set mbMode and MVs */
+ currMB->mbMode = AVC_P16;
+ currMB->MBPartPredMode[0][0] = AVC_Pred_L0;
+ mv_uint32 = ((mot_mb_16x16->y) << 16) | ((mot_mb_16x16->x) & 0xffff);
+ for (k = 0; k < 32; k += 2)
+ {
+ currMB->mvL0[k>>1] = mv_uint32;
+ }
+
+ /* make a decision whether it should be tested for intra or not */
+ if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0)
+ {
+ if (false == IntraDecisionABE(&abe_cost, cur, pitch, true))
+ {
+ intraSearch[mbnum] = 0;
+ }
+ else
+ {
+ NumIntraSearch++;
+ rateCtrl->MADofMB[mbnum] = abe_cost;
+ }
+ }
+ else // boundary MBs, always do intra search
+ {
+ NumIntraSearch++;
+ }
+
+ totalSAD += (int) rateCtrl->MADofMB[mbnum];//mot_mb_16x16->sad;
+ }
+ else /* INTRA update, use for prediction */
+ {
+ mot_mb_16x16[0].x = mot_mb_16x16[0].y = 0;
+
+ /* reset all other MVs to zero */
+ /* mot_mb_16x8, mot_mb_8x16, mot_mb_8x8, etc. */
+ abe_cost = encvid->min_cost[mbnum] = 0x7FFFFFFF; /* max value for int */
+
+ if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0)
+ {
+ IntraDecisionABE(&abe_cost, cur, pitch, false);
+
+ rateCtrl->MADofMB[mbnum] = abe_cost;
+ totalSAD += abe_cost;
+ }
+
+ NumIntraSearch++ ;
+ /* cannot do I16 prediction here because it needs full decoding. */
+ // intraSearch[mbnum] = 1;
+
+ }
+
+ mbnum += incr_i;
+ offset += (incr_i << 4);
+
+ } /* for i */
+ } /* for j */
+
+ /* since we cannot do intra/inter decision here, the SCD has to be
+ based on other criteria such as motion vectors coherency or the SAD */
+ if (incr_i > 1 && numLoop) /* scene change on and first loop */
+ {
+ //if(NumIntraSearch > ((totalMB>>3)<<1) + (totalMB>>3)) /* 75% of 50%MBs */
+ if (NumIntraSearch*99 > (48*totalMB)) /* 20% of 50%MBs */
+ /* need to do more investigation about this threshold since the NumIntraSearch
+ only show potential intra MBs, not the actual one */
+ {
+ /* we can choose to just encode I_SLICE without IDR */
+ //video->nal_unit_type = AVC_NALTYPE_IDR;
+ video->nal_unit_type = AVC_NALTYPE_SLICE;
+ video->sliceHdr->slice_type = AVC_I_ALL_SLICE;
+ video->slice_type = AVC_I_SLICE;
+ memset(intraSearch, 1, sizeof(uint8)*totalMB);
+ i = totalMB;
+ while (i--)
+ {
+ mblock[i].mb_intra = 1;
+ encvid->min_cost[i] = 0x7FFFFFFF; /* max value for int */
+ }
+
+ rateCtrl->totalSAD = totalSAD * 2; /* SAD */
+
+ return ;
+ }
+ }
+ /******** no scene change, continue motion search **********************/
+ start_i = 0;
+ type_pred++; /* second pass */
+ }
+
+ rateCtrl->totalSAD = totalSAD; /* SAD */
+
+#ifdef HTFM
+ /***** HYPOTHESIS TESTING ********/
+ if (collect)
+ {
+ collect = 0;
+ UpdateHTFM(encvid, newvar, exp_lamda, &htfm_stat);
+ }
+ /*********************************/
+#endif
+
+ return ;
+}
+
+/*=====================================================================
+ Function: PaddingEdge
+ Date: 09/16/2000
+ Purpose: Pad edge of a Vop
+=====================================================================*/
+
+void AVCPaddingEdge(AVCPictureData *refPic)
+{
+ uint8 *src, *dst;
+ int i;
+ int pitch, width, height;
+ uint32 temp1, temp2;
+
+ width = refPic->width;
+ height = refPic->height;
+ pitch = refPic->pitch;
+
+ /* pad top */
+ src = refPic->Sl;
+
+ temp1 = *src; /* top-left corner */
+ temp2 = src[width-1]; /* top-right corner */
+ temp1 |= (temp1 << 8);
+ temp1 |= (temp1 << 16);
+ temp2 |= (temp2 << 8);
+ temp2 |= (temp2 << 16);
+
+ dst = src - (pitch << 4);
+
+ *((uint32*)(dst - 16)) = temp1;
+ *((uint32*)(dst - 12)) = temp1;
+ *((uint32*)(dst - 8)) = temp1;
+ *((uint32*)(dst - 4)) = temp1;
+
+ memcpy(dst, src, width);
+
+ *((uint32*)(dst += width)) = temp2;
+ *((uint32*)(dst + 4)) = temp2;
+ *((uint32*)(dst + 8)) = temp2;
+ *((uint32*)(dst + 12)) = temp2;
+
+ dst = dst - width - 16;
+
+ i = 15;
+ while (i--)
+ {
+ memcpy(dst + pitch, dst, pitch);
+ dst += pitch;
+ }
+
+ /* pad sides */
+ dst += (pitch + 16);
+ src = dst;
+ i = height;
+ while (i--)
+ {
+ temp1 = *src;
+ temp2 = src[width-1];
+ temp1 |= (temp1 << 8);
+ temp1 |= (temp1 << 16);
+ temp2 |= (temp2 << 8);
+ temp2 |= (temp2 << 16);
+
+ *((uint32*)(dst - 16)) = temp1;
+ *((uint32*)(dst - 12)) = temp1;
+ *((uint32*)(dst - 8)) = temp1;
+ *((uint32*)(dst - 4)) = temp1;
+
+ *((uint32*)(dst += width)) = temp2;
+ *((uint32*)(dst + 4)) = temp2;
+ *((uint32*)(dst + 8)) = temp2;
+ *((uint32*)(dst + 12)) = temp2;
+
+ src += pitch;
+ dst = src;
+ }
+
+ /* pad bottom */
+ dst -= 16;
+ i = 16;
+ while (i--)
+ {
+ memcpy(dst, dst - pitch, pitch);
+ dst += pitch;
+ }
+
+
+ return ;
+}
+
+/*===========================================================================
+ Function: AVCRasterIntraUpdate
+ Date: 2/26/01
+ Purpose: To raster-scan assign INTRA-update .
+ N macroblocks are updated (also was programmable).
+===========================================================================*/
+void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh)
+{
+ int indx, i;
+
+ indx = encvid->firstIntraRefreshMBIndx;
+ for (i = 0; i < numRefresh && indx < totalMB; i++)
+ {
+ (mblock + indx)->mb_intra = 1;
+ encvid->intraSearch[indx++] = 1;
+ }
+
+ /* if read the end of frame, reset and loop around */
+ if (indx >= totalMB - 1)
+ {
+ indx = 0;
+ while (i < numRefresh && indx < totalMB)
+ {
+ (mblock + indx)->mb_intra = 1;
+ encvid->intraSearch[indx++] = 1;
+ i++;
+ }
+ }
+
+ encvid->firstIntraRefreshMBIndx = indx; /* update with a new value */
+
+ return ;
+}
+
+
+#ifdef HTFM
+void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect)
+{
+ AVCCommonObj *video = encvid->common;
+ int i;
+ int lx = video->currPic->width; // padding
+ int lx2 = lx << 1;
+ int lx3 = lx2 + lx;
+ int rx = video->currPic->pitch;
+ int rx2 = rx << 1;
+ int rx3 = rx2 + rx;
+
+ int *offset, *offset2;
+
+ /* 4/11/01, collect data every 30 frames, doesn't have to be base layer */
+ if (((int)video->sliceHdr->frame_num) % 30 == 1)
+ {
+
+ *collect = 1;
+
+ htfm_stat->countbreak = 0;
+ htfm_stat->abs_dif_mad_avg = 0;
+
+ for (i = 0; i < 16; i++)
+ {
+ newvar[i] = 0.0;
+ }
+// encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM_Collect;
+ encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM_Collect;
+ encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;
+ encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFM_Collectxh;
+ encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFM_Collectyh;
+ encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFM_Collectxhyh;
+ encvid->sad_extra_info = (void*)(htfm_stat);
+ offset = htfm_stat->offsetArray;
+ offset2 = htfm_stat->offsetRef;
+ }
+ else
+ {
+// encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM;
+ encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM;
+ encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;
+ encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFMxh;
+ encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFMyh;
+ encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFMxhyh;
+ encvid->sad_extra_info = (void*)(encvid->nrmlz_th);
+ offset = encvid->nrmlz_th + 16;
+ offset2 = encvid->nrmlz_th + 32;
+ }
+
+ offset[0] = 0;
+ offset[1] = lx2 + 2;
+ offset[2] = 2;
+ offset[3] = lx2;
+ offset[4] = lx + 1;
+ offset[5] = lx3 + 3;
+ offset[6] = lx + 3;
+ offset[7] = lx3 + 1;
+ offset[8] = lx;
+ offset[9] = lx3 + 2;
+ offset[10] = lx3 ;
+ offset[11] = lx + 2 ;
+ offset[12] = 1;
+ offset[13] = lx2 + 3;
+ offset[14] = lx2 + 1;
+ offset[15] = 3;
+
+ offset2[0] = 0;
+ offset2[1] = rx2 + 2;
+ offset2[2] = 2;
+ offset2[3] = rx2;
+ offset2[4] = rx + 1;
+ offset2[5] = rx3 + 3;
+ offset2[6] = rx + 3;
+ offset2[7] = rx3 + 1;
+ offset2[8] = rx;
+ offset2[9] = rx3 + 2;
+ offset2[10] = rx3 ;
+ offset2[11] = rx + 2 ;
+ offset2[12] = 1;
+ offset2[13] = rx2 + 3;
+ offset2[14] = rx2 + 1;
+ offset2[15] = 3;
+
+ return ;
+}
+
+void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat)
+{
+ if (htfm_stat->countbreak == 0)
+ htfm_stat->countbreak = 1;
+
+ newvar[0] = (double)(htfm_stat->abs_dif_mad_avg) / (htfm_stat->countbreak * 16.);
+
+ if (newvar[0] < 0.001)
+ {
+ newvar[0] = 0.001; /* to prevent floating overflow */
+ }
+ exp_lamda[0] = 1 / (newvar[0] * 1.4142136);
+ exp_lamda[1] = exp_lamda[0] * 1.5825;
+ exp_lamda[2] = exp_lamda[0] * 2.1750;
+ exp_lamda[3] = exp_lamda[0] * 3.5065;
+ exp_lamda[4] = exp_lamda[0] * 3.1436;
+ exp_lamda[5] = exp_lamda[0] * 3.5315;
+ exp_lamda[6] = exp_lamda[0] * 3.7449;
+ exp_lamda[7] = exp_lamda[0] * 4.5854;
+ exp_lamda[8] = exp_lamda[0] * 4.6191;
+ exp_lamda[9] = exp_lamda[0] * 5.4041;
+ exp_lamda[10] = exp_lamda[0] * 6.5974;
+ exp_lamda[11] = exp_lamda[0] * 10.5341;
+ exp_lamda[12] = exp_lamda[0] * 10.0719;
+ exp_lamda[13] = exp_lamda[0] * 12.0516;
+ exp_lamda[14] = exp_lamda[0] * 15.4552;
+
+ CalcThreshold(HTFM_Pf, exp_lamda, encvid->nrmlz_th);
+ return ;
+}
+
+
+void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[])
+{
+ int i;
+ double temp[15];
+ // printf("\nLamda: ");
+
+ /* parametric PREMODELling */
+ for (i = 0; i < 15; i++)
+ {
+ // printf("%g ",exp_lamda[i]);
+ if (pf < 0.5)
+ temp[i] = 1 / exp_lamda[i] * M4VENC_LOG(2 * pf);
+ else
+ temp[i] = -1 / exp_lamda[i] * M4VENC_LOG(2 * (1 - pf));
+ }
+
+ nrmlz_th[15] = 0;
+ for (i = 0; i < 15; i++) /* scale upto no.pixels */
+ nrmlz_th[i] = (int)(temp[i] * ((i + 1) << 4) + 0.5);
+
+ return ;
+}
+
+void HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch)
+{
+ AVCCommonObj *video = encvid->common;
+ uint32 *htfmMB = (uint32*)(encvid->currYMB);
+ uint8 *ptr, byte;
+ int *offset;
+ int i;
+ uint32 word;
+
+ if (((int)video->sliceHdr->frame_num) % 30 == 1)
+ {
+ offset = htfm_stat->offsetArray;
+ }
+ else
+ {
+ offset = encvid->nrmlz_th + 16;
+ }
+
+ for (i = 0; i < 16; i++)
+ {
+ ptr = cur + offset[i];
+ word = ptr[0];
+ byte = ptr[4];
+ word |= (byte << 8);
+ byte = ptr[8];
+ word |= (byte << 16);
+ byte = ptr[12];
+ word |= (byte << 24);
+ *htfmMB++ = word;
+
+ word = *(ptr += (pitch << 2));
+ byte = ptr[4];
+ word |= (byte << 8);
+ byte = ptr[8];
+ word |= (byte << 16);
+ byte = ptr[12];
+ word |= (byte << 24);
+ *htfmMB++ = word;
+
+ word = *(ptr += (pitch << 2));
+ byte = ptr[4];
+ word |= (byte << 8);
+ byte = ptr[8];
+ word |= (byte << 16);
+ byte = ptr[12];
+ word |= (byte << 24);
+ *htfmMB++ = word;
+
+ word = *(ptr += (pitch << 2));
+ byte = ptr[4];
+ word |= (byte << 8);
+ byte = ptr[8];
+ word |= (byte << 16);
+ byte = ptr[12];
+ word |= (byte << 24);
+ *htfmMB++ = word;
+ }
+
+ return ;
+}
+
+
+#endif // HTFM
+
+void AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch)
+{
+ void* tmp = (void*)(encvid->currYMB);
+ uint32 *currYMB = (uint32*) tmp;
+ int i;
+
+ cur -= pitch;
+
+ for (i = 0; i < 16; i++)
+ {
+ *currYMB++ = *((uint32*)(cur += pitch));
+ *currYMB++ = *((uint32*)(cur + 4));
+ *currYMB++ = *((uint32*)(cur + 8));
+ *currYMB++ = *((uint32*)(cur + 12));
+ }
+
+ return ;
+}
+
+#ifdef FIXED_INTERPRED_MODE
+
+/* due to the complexity of the predicted motion vector, we may not decide to skip
+a macroblock here just yet. */
+/* We will find the best motion vector and the best intra prediction mode for each block. */
+/* output are
+ currMB->NumMbPart, currMB->MbPartWidth, currMB->MbPartHeight,
+ currMB->NumSubMbPart[], currMB->SubMbPartWidth[], currMB->SubMbPartHeight,
+ currMB->MBPartPredMode[][] (L0 or L1 or BiPred)
+ currMB->RefIdx[], currMB->ref_idx_L0[],
+ currMB->mvL0[], currMB->mvL1[]
+ */
+
+AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum,
+ int num_pass)
+{
+ AVCCommonObj *video = encvid->common;
+ int mbPartIdx, subMbPartIdx;
+ int16 *mv;
+ int i;
+ int SubMbPartHeight, SubMbPartWidth, NumSubMbPart;
+
+ /* assign value to currMB->MBPartPredMode[][x],subMbMode[],NumSubMbPart[],SubMbPartWidth[],SubMbPartHeight[] */
+
+ currMB->mbMode = FIXED_INTERPRED_MODE;
+ currMB->mb_intra = 0;
+
+ if (currMB->mbMode == AVC_P16)
+ {
+ currMB->NumMbPart = 1;
+ currMB->MbPartWidth = 16;
+ currMB->MbPartHeight = 16;
+ currMB->SubMbPartHeight[0] = 16;
+ currMB->SubMbPartWidth[0] = 16;
+ currMB->NumSubMbPart[0] = 1;
+ }
+ else if (currMB->mbMode == AVC_P16x8)
+ {
+ currMB->NumMbPart = 2;
+ currMB->MbPartWidth = 16;
+ currMB->MbPartHeight = 8;
+ for (i = 0; i < 2; i++)
+ {
+ currMB->SubMbPartWidth[i] = 16;
+ currMB->SubMbPartHeight[i] = 8;
+ currMB->NumSubMbPart[i] = 1;
+ }
+ }
+ else if (currMB->mbMode == AVC_P8x16)
+ {
+ currMB->NumMbPart = 2;
+ currMB->MbPartWidth = 8;
+ currMB->MbPartHeight = 16;
+ for (i = 0; i < 2; i++)
+ {
+ currMB->SubMbPartWidth[i] = 8;
+ currMB->SubMbPartHeight[i] = 16;
+ currMB->NumSubMbPart[i] = 1;
+ }
+ }
+ else if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)
+ {
+ currMB->NumMbPart = 4;
+ currMB->MbPartWidth = 8;
+ currMB->MbPartHeight = 8;
+ if (FIXED_SUBMB_MODE == AVC_8x8)
+ {
+ SubMbPartHeight = 8;
+ SubMbPartWidth = 8;
+ NumSubMbPart = 1;
+ }
+ else if (FIXED_SUBMB_MODE == AVC_8x4)
+ {
+ SubMbPartHeight = 4;
+ SubMbPartWidth = 8;
+ NumSubMbPart = 2;
+ }
+ else if (FIXED_SUBMB_MODE == AVC_4x8)
+ {
+ SubMbPartHeight = 8;
+ SubMbPartWidth = 4;
+ NumSubMbPart = 2;
+ }
+ else if (FIXED_SUBMB_MODE == AVC_4x4)
+ {
+ SubMbPartHeight = 4;
+ SubMbPartWidth = 4;
+ NumSubMbPart = 4;
+ }
+
+ for (i = 0; i < 4; i++)
+ {
+ currMB->subMbMode[i] = FIXED_SUBMB_MODE;
+ currMB->SubMbPartHeight[i] = SubMbPartHeight;
+ currMB->SubMbPartWidth[i] = SubMbPartWidth;
+ currMB->NumSubMbPart[i] = NumSubMbPart;
+ }
+ }
+ else /* it's probably intra mode */
+ {
+ return AVCENC_SUCCESS;
+ }
+
+ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
+ {
+ currMB->MBPartPredMode[mbPartIdx][0] = AVC_Pred_L0;
+ currMB->ref_idx_L0[mbPartIdx] = FIXED_REF_IDX;
+ currMB->RefIdx[mbPartIdx] = video->RefPicList0[FIXED_REF_IDX]->RefIdx;
+
+ for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++)
+ {
+ mv = (int16*)(currMB->mvL0 + (mbPartIdx << 2) + subMbPartIdx);
+
+ *mv++ = FIXED_MVX;
+ *mv = FIXED_MVY;
+ }
+ }
+
+ encvid->min_cost = 0;
+
+ return AVCENC_SUCCESS;
+}
+
+#else /* perform the search */
+
+/* This option #1 search is very similar to PV's MPEG4 motion search algorithm.
+ The search is done in hierarchical manner from 16x16 MB down to smaller and smaller
+ partition. At each level, a decision can be made to stop the search if the expected
+ prediction gain is not worth the computation. The decision can also be made at the finest
+ level for more fullsearch-like behavior with the price of heavier computation. */
+void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[],
+ int i0, int j0, int type_pred, int FS_en, int *hp_guess)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCPictureData *currPic = video->currPic;
+ AVCSeqParamSet *currSPS = video->currSeqParams;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ AVCMacroblock *currMB = video->currMB;
+ uint8 *ref, *cand, *ncand;
+ void *extra_info = encvid->sad_extra_info;
+ int mbnum = video->mbNum;
+ int width = currPic->width; /* 6/12/01, must be multiple of 16 */
+ int height = currPic->height;
+ AVCMV *mot16x16 = encvid->mot16x16;
+ int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock;
+
+ int range = rateCtrl->mvRange;
+
+ int lx = currPic->pitch; /* padding */
+ int i, j, imin, jmin, ilow, ihigh, jlow, jhigh;
+ int d, dmin, dn[9];
+ int k;
+ int mvx[5], mvy[5];
+ int num_can, center_again;
+ int last_loc, new_loc = 0;
+ int step, max_step = range >> 1;
+ int next;
+
+ int cmvx, cmvy; /* estimated predicted MV */
+ int lev_idx;
+ int lambda_motion = encvid->lambda_motion;
+ uint8 *mvbits = encvid->mvbits;
+ int mvshift = 2;
+ int mvcost;
+
+ int min_sad = 65535;
+
+ ref = video->RefPicList0[DEFAULT_REF_IDX]->Sl; /* origin of actual frame */
+
+ /* have to initialize these params, necessary for interprediction part */
+ currMB->NumMbPart = 1;
+ currMB->SubMbPartHeight[0] = 16;
+ currMB->SubMbPartWidth[0] = 16;
+ currMB->NumSubMbPart[0] = 1;
+ currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] =
+ currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = DEFAULT_REF_IDX;
+ currMB->ref_idx_L1[0] = currMB->ref_idx_L1[1] =
+ currMB->ref_idx_L1[2] = currMB->ref_idx_L1[3] = DEFAULT_REF_IDX;
+ currMB->RefIdx[0] = currMB->RefIdx[1] =
+ currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[DEFAULT_REF_IDX]->RefIdx;
+
+ cur = encvid->currYMB; /* use smaller memory space for current MB */
+
+ /* find limit of the search (adjusting search range)*/
+ lev_idx = mapLev2Idx[currSPS->level_idc];
+
+ /* we can make this part dynamic based on previous statistics */
+ ilow = i0 - range;
+ if (i0 - ilow > 2047) /* clip to conform with the standard */
+ {
+ ilow = i0 - 2047;
+ }
+ if (ilow < -13) // change it from -15 to -13 because of 6-tap filter needs extra 2 lines.
+ {
+ ilow = -13;
+ }
+
+ ihigh = i0 + range - 1;
+ if (ihigh - i0 > 2047) /* clip to conform with the standard */
+ {
+ ihigh = i0 + 2047;
+ }
+ if (ihigh > width - 3)
+ {
+ ihigh = width - 3; // change from width-1 to width-3 for the same reason as above
+ }
+
+ jlow = j0 - range;
+ if (j0 - jlow > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */
+ {
+ jlow = j0 - MaxVmvR[lev_idx] + 1;
+ }
+ if (jlow < -13) // same reason as above
+ {
+ jlow = -13;
+ }
+
+ jhigh = j0 + range - 1;
+ if (jhigh - j0 > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */
+ {
+ jhigh = j0 + MaxVmvR[lev_idx] - 1;
+ }
+ if (jhigh > height - 3) // same reason as above
+ {
+ jhigh = height - 3;
+ }
+
+ /* find initial motion vector & predicted MV*/
+ AVCCandidateSelection(mvx, mvy, &num_can, i0 >> 4, j0 >> 4, encvid, type_pred, &cmvx, &cmvy);
+
+ imin = i0;
+ jmin = j0; /* needed for fullsearch */
+ ncand = ref + i0 + j0 * lx;
+
+ /* for first row of MB, fullsearch can be used */
+ if (FS_en)
+ {
+ *hp_guess = 0; /* no guess for fast half-pel */
+
+ dmin = AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy);
+
+ ncand = ref + imin + jmin * lx;
+ }
+ else
+ { /* fullsearch the top row to only upto (0,3) MB */
+ /* upto 30% complexity saving with the same complexity */
+ if (video->PrevRefFrameNum == 0 && j0 == 0 && i0 <= 64 && type_pred != 1)
+ {
+ *hp_guess = 0; /* no guess for fast half-pel */
+ dmin = AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy);
+ ncand = ref + imin + jmin * lx;
+ }
+ else
+ {
+ /************** initialize candidate **************************/
+
+ dmin = 65535;
+
+ /* check if all are equal */
+ if (num_can == ALL_CAND_EQUAL)
+ {
+ i = i0 + mvx[0];
+ j = j0 + mvy[0];
+
+ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
+ {
+ cand = ref + i + j * lx;
+
+ d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);
+ mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
+ d += mvcost;
+
+ if (d < dmin)
+ {
+ dmin = d;
+ imin = i;
+ jmin = j;
+ ncand = cand;
+ min_sad = d - mvcost; // for rate control
+ }
+ }
+ }
+ else
+ {
+ /************** evaluate unique candidates **********************/
+ for (k = 0; k < num_can; k++)
+ {
+ i = i0 + mvx[k];
+ j = j0 + mvy[k];
+
+ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
+ {
+ cand = ref + i + j * lx;
+ d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);
+ mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
+ d += mvcost;
+
+ if (d < dmin)
+ {
+ dmin = d;
+ imin = i;
+ jmin = j;
+ ncand = cand;
+ min_sad = d - mvcost; // for rate control
+ }
+ }
+ }
+ }
+
+ /******************* local refinement ***************************/
+ center_again = 0;
+ last_loc = new_loc = 0;
+ // ncand = ref + jmin*lx + imin; /* center of the search */
+ step = 0;
+ dn[0] = dmin;
+ while (!center_again && step <= max_step)
+ {
+
+ AVCMoveNeighborSAD(dn, last_loc);
+
+ center_again = 1;
+ i = imin;
+ j = jmin - 1;
+ cand = ref + i + j * lx;
+
+ /* starting from [0,-1] */
+ /* spiral check one step at a time*/
+ for (k = 2; k <= 8; k += 2)
+ {
+ if (!tab_exclude[last_loc][k]) /* exclude last step computation */
+ { /* not already computed */
+ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
+ {
+ d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);
+ mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
+ d += mvcost;
+
+ dn[k] = d; /* keep it for half pel use */
+
+ if (d < dmin)
+ {
+ ncand = cand;
+ dmin = d;
+ imin = i;
+ jmin = j;
+ center_again = 0;
+ new_loc = k;
+ min_sad = d - mvcost; // for rate control
+ }
+ }
+ }
+ if (k == 8) /* end side search*/
+ {
+ if (!center_again)
+ {
+ k = -1; /* start diagonal search */
+ cand -= lx;
+ j--;
+ }
+ }
+ else
+ {
+ next = refine_next[k][0];
+ i += next;
+ cand += next;
+ next = refine_next[k][1];
+ j += next;
+ cand += lx * next;
+ }
+ }
+ last_loc = new_loc;
+ step ++;
+ }
+ if (!center_again)
+ AVCMoveNeighborSAD(dn, last_loc);
+
+ *hp_guess = AVCFindMin(dn);
+
+ encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;
+ }
+ }
+
+ mot16x16[mbnum].sad = dmin;
+ mot16x16[mbnum].x = (imin - i0) << 2;
+ mot16x16[mbnum].y = (jmin - j0) << 2;
+ best_cand[0] = ncand;
+
+ if (rateCtrl->subPelEnable) // always enable half-pel search
+ {
+ /* find half-pel resolution motion vector */
+ min_sad = AVCFindHalfPelMB(encvid, cur, mot16x16 + mbnum, best_cand[0], i0, j0, *hp_guess, cmvx, cmvy);
+
+ encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;
+
+
+ if (encvid->best_qpel_pos == -1)
+ {
+ ncand = encvid->hpel_cand[encvid->best_hpel_pos];
+ }
+ else
+ {
+ ncand = encvid->qpel_cand[encvid->best_qpel_pos];
+ }
+ }
+ else
+ {
+ encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;
+ }
+
+ /** do motion comp here for now */
+ ref = currPic->Sl + i0 + j0 * lx;
+ /* copy from the best result to current Picture */
+ for (j = 0; j < 16; j++)
+ {
+ for (i = 0; i < 16; i++)
+ {
+ *ref++ = *ncand++;
+ }
+ ref += (lx - 16);
+ ncand += 8;
+ }
+
+ return ;
+}
+
+#endif
+
+/*===============================================================================
+ Function: AVCFullSearch
+ Date: 09/16/2000
+ Purpose: Perform full-search motion estimation over the range of search
+ region in a spiral-outward manner.
+ Input/Output: VideoEncData, current Vol, previou Vop, pointer to the left corner of
+ current VOP, current coord (also output), boundaries.
+===============================================================================*/
+int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur,
+ int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh,
+ int cmvx, int cmvy)
+{
+ int range = encvid->rateCtrl->mvRange;
+ AVCPictureData *currPic = encvid->common->currPic;
+ uint8 *cand;
+ int i, j, k, l;
+ int d, dmin;
+ int i0 = *imin; /* current position */
+ int j0 = *jmin;
+ int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock;
+ void *extra_info = encvid->sad_extra_info;
+ int lx = currPic->pitch; /* with padding */
+
+ int offset = i0 + j0 * lx;
+
+ int lambda_motion = encvid->lambda_motion;
+ uint8 *mvbits = encvid->mvbits;
+ int mvshift = 2;
+ int mvcost;
+ int min_sad;
+
+ cand = prev + offset;
+
+ dmin = (*SAD_Macroblock)(cand, cur, (65535 << 16) | lx, (void*)extra_info);
+ mvcost = MV_COST(lambda_motion, mvshift, 0, 0, cmvx, cmvy);
+ min_sad = dmin;
+ dmin += mvcost;
+
+ /* perform spiral search */
+ for (k = 1; k <= range; k++)
+ {
+
+ i = i0 - k;
+ j = j0 - k;
+
+ cand = prev + i + j * lx;
+
+ for (l = 0; l < 8*k; l++)
+ {
+ /* no need for boundary checking again */
+ if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)
+ {
+ d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, (void*)extra_info);
+ mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);
+ d += mvcost;
+
+ if (d < dmin)
+ {
+ dmin = d;
+ *imin = i;
+ *jmin = j;
+ min_sad = d - mvcost;
+ }
+ }
+
+ if (l < (k << 1))
+ {
+ i++;
+ cand++;
+ }
+ else if (l < (k << 2))
+ {
+ j++;
+ cand += lx;
+ }
+ else if (l < ((k << 2) + (k << 1)))
+ {
+ i--;
+ cand--;
+ }
+ else
+ {
+ j--;
+ cand -= lx;
+ }
+ }
+ }
+
+ encvid->rateCtrl->MADofMB[encvid->common->mbNum] = (min_sad / 256.0); // for rate control
+
+ return dmin;
+}
+
+/*===============================================================================
+ Function: AVCCandidateSelection
+ Date: 09/16/2000
+ Purpose: Fill up the list of candidate using spatio-temporal correlation
+ among neighboring blocks.
+ Input/Output: type_pred = 0: first pass, 1: second pass, or no SCD
+ Modified: , 09/23/01, get rid of redundant candidates before passing back.
+ , 09/11/07, added return for modified predicted MV, this will be
+ needed for both fast search and fullsearch.
+===============================================================================*/
+
+void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb,
+ AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCMV *mot16x16 = encvid->mot16x16;
+ AVCMV *pmot;
+ int mbnum = video->mbNum;
+ int mbwidth = video->PicWidthInMbs;
+ int mbheight = video->PicHeightInMbs;
+ int i, j, same, num1;
+
+ /* this part is for predicted MV */
+ int pmvA_x = 0, pmvA_y = 0, pmvB_x = 0, pmvB_y = 0, pmvC_x = 0, pmvC_y = 0;
+ int availA = 0, availB = 0, availC = 0;
+
+ *num_can = 0;
+
+ if (video->PrevRefFrameNum != 0) // previous frame is an IDR frame
+ {
+ /* Spatio-Temporal Candidate (five candidates) */
+ if (type_pred == 0) /* first pass */
+ {
+ pmot = &mot16x16[mbnum]; /* same coordinate previous frame */
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ if (imb >= (mbwidth >> 1) && imb > 0) /*left neighbor previous frame */
+ {
+ pmot = &mot16x16[mbnum-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ else if (imb + 1 < mbwidth) /*right neighbor previous frame */
+ {
+ pmot = &mot16x16[mbnum+1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+
+ if (jmb < mbheight - 1) /*bottom neighbor previous frame */
+ {
+ pmot = &mot16x16[mbnum+mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ else if (jmb > 0) /*upper neighbor previous frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+
+ if (imb > 0 && jmb > 0) /* upper-left neighbor current frame*/
+ {
+ pmot = &mot16x16[mbnum-mbwidth-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor current frame*/
+ {
+ pmot = &mot16x16[mbnum-mbwidth+1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ }
+ else /* second pass */
+ /* original ST1 algorithm */
+ {
+ pmot = &mot16x16[mbnum]; /* same coordinate previous frame */
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+
+ if (imb > 0) /*left neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (jmb > 0) /*upper neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (imb < mbwidth - 1) /*right neighbor previous frame */
+ {
+ pmot = &mot16x16[mbnum+1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (jmb < mbheight - 1) /*bottom neighbor previous frame */
+ {
+ pmot = &mot16x16[mbnum+mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ }
+
+ /* get predicted MV */
+ if (imb > 0) /* get MV from left (A) neighbor either on current or previous frame */
+ {
+ availA = 1;
+ pmot = &mot16x16[mbnum-1];
+ pmvA_x = pmot->x;
+ pmvA_y = pmot->y;
+ }
+
+ if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */
+ {
+ availB = 1;
+ pmot = &mot16x16[mbnum-mbwidth];
+ pmvB_x = pmot->x;
+ pmvB_y = pmot->y;
+
+ availC = 1;
+
+ if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth+1];
+ }
+ else /* get MV from top-left (D) neighbor of current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth-1];
+ }
+ pmvC_x = pmot->x;
+ pmvC_y = pmot->y;
+ }
+
+ }
+ else /* only Spatial Candidate (four candidates)*/
+ {
+ if (type_pred == 0) /*first pass*/
+ {
+ if (imb > 1) /* neighbor two blocks away to the left */
+ {
+ pmot = &mot16x16[mbnum-2];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (imb > 0 && jmb > 0) /* upper-left neighbor */
+ {
+ pmot = &mot16x16[mbnum-mbwidth-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (jmb > 0 && imb < mbheight - 1) /* upper right neighbor */
+ {
+ pmot = &mot16x16[mbnum-mbwidth+1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+
+ /* get predicted MV */
+ if (imb > 1) /* get MV from 2nd left (A) neighbor either of current frame */
+ {
+ availA = 1;
+ pmot = &mot16x16[mbnum-2];
+ pmvA_x = pmot->x;
+ pmvA_y = pmot->y;
+ }
+
+ if (jmb > 0 && imb > 0) /* get MV from top-left (B) neighbor of current frame */
+ {
+ availB = 1;
+ pmot = &mot16x16[mbnum-mbwidth-1];
+ pmvB_x = pmot->x;
+ pmvB_y = pmot->y;
+ }
+
+ if (jmb > 0 && imb < mbwidth - 1)
+ {
+ availC = 1;
+ pmot = &mot16x16[mbnum-mbwidth+1];
+ pmvC_x = pmot->x;
+ pmvC_y = pmot->y;
+ }
+ }
+//#ifdef SCENE_CHANGE_DETECTION
+ /* second pass (ST2 algorithm)*/
+ else
+ {
+ if (type_pred == 1) /* 4/7/01 */
+ {
+ if (imb > 0) /*left neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (jmb > 0) /*upper neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (imb < mbwidth - 1) /*right neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum+1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ if (jmb < mbheight - 1) /*bottom neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum+mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ }
+ //#else
+ else /* original ST1 algorithm */
+ {
+ if (imb > 0) /*left neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+
+ if (jmb > 0) /*upper-left neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth-1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+
+ }
+ if (jmb > 0) /*upper neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+
+ if (imb < mbheight - 1) /*upper-right neighbor current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth+1];
+ mvx[(*num_can)] = (pmot->x) >> 2;
+ mvy[(*num_can)++] = (pmot->y) >> 2;
+ }
+ }
+ }
+
+ /* get predicted MV */
+ if (imb > 0) /* get MV from left (A) neighbor either on current or previous frame */
+ {
+ availA = 1;
+ pmot = &mot16x16[mbnum-1];
+ pmvA_x = pmot->x;
+ pmvA_y = pmot->y;
+ }
+
+ if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */
+ {
+ availB = 1;
+ pmot = &mot16x16[mbnum-mbwidth];
+ pmvB_x = pmot->x;
+ pmvB_y = pmot->y;
+
+ availC = 1;
+
+ if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth+1];
+ }
+ else /* get MV from top-left (D) neighbor of current frame */
+ {
+ pmot = &mot16x16[mbnum-mbwidth-1];
+ }
+ pmvC_x = pmot->x;
+ pmvC_y = pmot->y;
+ }
+ }
+//#endif
+ }
+
+ /* 3/23/01, remove redundant candidate (possible k-mean) */
+ num1 = *num_can;
+ *num_can = 1;
+ for (i = 1; i < num1; i++)
+ {
+ same = 0;
+ j = 0;
+ while (!same && j < *num_can)
+ {
+#if (CANDIDATE_DISTANCE==0)
+ if (mvx[i] == mvx[j] && mvy[i] == mvy[j])
+#else
+ // modified k-mean, 3/24/01, shouldn't be greater than 3
+ if (AVC_ABS(mvx[i] - mvx[j]) + AVC_ABS(mvy[i] - mvy[j]) < CANDIDATE_DISTANCE)
+#endif
+ same = 1;
+ j++;
+ }
+ if (!same)
+ {
+ mvx[*num_can] = mvx[i];
+ mvy[*num_can] = mvy[i];
+ (*num_can)++;
+ }
+ }
+
+ if (num1 == 5 && *num_can == 1)
+ *num_can = ALL_CAND_EQUAL; /* all are equal */
+
+ /* calculate predicted MV */
+
+ if (availA && !(availB || availC))
+ {
+ *cmvx = pmvA_x;
+ *cmvy = pmvA_y;
+ }
+ else
+ {
+ *cmvx = AVC_MEDIAN(pmvA_x, pmvB_x, pmvC_x);
+ *cmvy = AVC_MEDIAN(pmvA_y, pmvB_y, pmvC_y);
+ }
+
+ return ;
+}
+
+
+/*************************************************************
+ Function: AVCMoveNeighborSAD
+ Date: 3/27/01
+ Purpose: Move neighboring SAD around when center has shifted
+*************************************************************/
+
+void AVCMoveNeighborSAD(int dn[], int new_loc)
+{
+ int tmp[9];
+ tmp[0] = dn[0];
+ tmp[1] = dn[1];
+ tmp[2] = dn[2];
+ tmp[3] = dn[3];
+ tmp[4] = dn[4];
+ tmp[5] = dn[5];
+ tmp[6] = dn[6];
+ tmp[7] = dn[7];
+ tmp[8] = dn[8];
+ dn[0] = dn[1] = dn[2] = dn[3] = dn[4] = dn[5] = dn[6] = dn[7] = dn[8] = 65536;
+
+ switch (new_loc)
+ {
+ case 0:
+ break;
+ case 1:
+ dn[4] = tmp[2];
+ dn[5] = tmp[0];
+ dn[6] = tmp[8];
+ break;
+ case 2:
+ dn[4] = tmp[3];
+ dn[5] = tmp[4];
+ dn[6] = tmp[0];
+ dn[7] = tmp[8];
+ dn[8] = tmp[1];
+ break;
+ case 3:
+ dn[6] = tmp[4];
+ dn[7] = tmp[0];
+ dn[8] = tmp[2];
+ break;
+ case 4:
+ dn[1] = tmp[2];
+ dn[2] = tmp[3];
+ dn[6] = tmp[5];
+ dn[7] = tmp[6];
+ dn[8] = tmp[0];
+ break;
+ case 5:
+ dn[1] = tmp[0];
+ dn[2] = tmp[4];
+ dn[8] = tmp[6];
+ break;
+ case 6:
+ dn[1] = tmp[8];
+ dn[2] = tmp[0];
+ dn[3] = tmp[4];
+ dn[4] = tmp[5];
+ dn[8] = tmp[7];
+ break;
+ case 7:
+ dn[2] = tmp[8];
+ dn[3] = tmp[0];
+ dn[4] = tmp[6];
+ break;
+ case 8:
+ dn[2] = tmp[1];
+ dn[3] = tmp[2];
+ dn[4] = tmp[0];
+ dn[5] = tmp[6];
+ dn[6] = tmp[7];
+ break;
+ }
+ dn[0] = tmp[new_loc];
+
+ return ;
+}
+
+/* 3/28/01, find minimal of dn[9] */
+
+int AVCFindMin(int dn[])
+{
+ int min, i;
+ int dmin;
+
+ dmin = dn[1];
+ min = 1;
+ for (i = 2; i < 9; i++)
+ {
+ if (dn[i] < dmin)
+ {
+ dmin = dn[i];
+ min = i;
+ }
+ }
+
+ return min;
+}
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/rate_control.cpp b/media/libstagefright/codecs/avc/enc/src/rate_control.cpp
new file mode 100644
index 0000000..15b55fb
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/rate_control.cpp
@@ -0,0 +1,981 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+#include <math.h>
+
+/* rate control variables */
+#define RC_MAX_QUANT 51
+#define RC_MIN_QUANT 0 //cap to 10 to prevent rate fluctuation
+
+#define MAD_MIN 1 /* handle the case of devision by zero in RC */
+
+
+/* local functions */
+double QP2Qstep(int QP);
+int Qstep2QP(double Qstep);
+
+double ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl);
+
+void targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP);
+
+void calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video,
+ AVCRateControl *rateCtrl, MultiPass *pMP);
+
+void updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP);
+
+void AVCSaveRDSamples(MultiPass *pMP, int counter_samples);
+
+void updateRateControl(AVCRateControl *rateControl, int nal_type);
+
+int GetAvgFrameQP(AVCRateControl *rateCtrl)
+{
+ return rateCtrl->Qc;
+}
+
+AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ uint32 modTimeRef = encvid->modTimeRef;
+ int32 currFrameNum ;
+ int frameInc;
+
+
+ /* check with the buffer fullness to make sure that we have enough bits to encode this frame */
+ /* we can use a threshold to guarantee minimum picture quality */
+ /**********************************/
+
+ /* for now, the default is to encode every frame, To Be Changed */
+ if (rateCtrl->first_frame)
+ {
+ encvid->modTimeRef = modTime;
+ encvid->wrapModTime = 0;
+ encvid->prevFrameNum = 0;
+ encvid->prevProcFrameNum = 0;
+
+ *frameNum = 0;
+
+ /* set frame type to IDR-frame */
+ video->nal_unit_type = AVC_NALTYPE_IDR;
+ sliceHdr->slice_type = AVC_I_ALL_SLICE;
+ video->slice_type = AVC_I_SLICE;
+
+ return AVCENC_SUCCESS;
+ }
+ else
+ {
+ if (modTime < modTimeRef) /* modTime wrapped around */
+ {
+ encvid->wrapModTime += ((uint32)0xFFFFFFFF - modTimeRef) + 1;
+ encvid->modTimeRef = modTimeRef = 0;
+ }
+ modTime += encvid->wrapModTime; /* wrapModTime is non zero after wrap-around */
+
+ currFrameNum = (int32)(((modTime - modTimeRef) * rateCtrl->frame_rate + 200) / 1000); /* add small roundings */
+
+ if (currFrameNum <= (int32)encvid->prevProcFrameNum)
+ {
+ return AVCENC_FAIL; /* this is a late frame do not encode it */
+ }
+
+ frameInc = currFrameNum - encvid->prevProcFrameNum;
+
+ if (frameInc < rateCtrl->skip_next_frame + 1)
+ {
+ return AVCENC_FAIL; /* frame skip required to maintain the target bit rate. */
+ }
+
+ RCUpdateBuffer(video, rateCtrl, frameInc - rateCtrl->skip_next_frame); /* in case more frames dropped */
+
+ *frameNum = currFrameNum;
+
+ /* This part would be similar to DetermineVopType of m4venc */
+ if ((*frameNum >= (uint)rateCtrl->idrPeriod && rateCtrl->idrPeriod > 0) || (*frameNum > video->MaxFrameNum)) /* first frame or IDR*/
+ {
+ /* set frame type to IDR-frame */
+ if (rateCtrl->idrPeriod)
+ {
+ encvid->modTimeRef += (uint32)(rateCtrl->idrPeriod * 1000 / rateCtrl->frame_rate);
+ *frameNum -= rateCtrl->idrPeriod;
+ }
+ else
+ {
+ encvid->modTimeRef += (uint32)(video->MaxFrameNum * 1000 / rateCtrl->frame_rate);
+ *frameNum -= video->MaxFrameNum;
+ }
+
+ video->nal_unit_type = AVC_NALTYPE_IDR;
+ sliceHdr->slice_type = AVC_I_ALL_SLICE;
+ video->slice_type = AVC_I_SLICE;
+ encvid->prevProcFrameNum = *frameNum;
+ }
+ else
+ {
+ video->nal_unit_type = AVC_NALTYPE_SLICE;
+ sliceHdr->slice_type = AVC_P_ALL_SLICE;
+ video->slice_type = AVC_P_SLICE;
+ encvid->prevProcFrameNum = currFrameNum;
+ }
+
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc)
+{
+ int tmp;
+ MultiPass *pMP = rateCtrl->pMP;
+
+ OSCL_UNUSED_ARG(video);
+
+ if (rateCtrl->rcEnable == TRUE)
+ {
+ if (frameInc > 1)
+ {
+ tmp = rateCtrl->bitsPerFrame * (frameInc - 1);
+ rateCtrl->VBV_fullness -= tmp;
+ pMP->counter_BTsrc += 10 * (frameInc - 1);
+
+ /* Check buffer underflow */
+ if (rateCtrl->VBV_fullness < rateCtrl->low_bound)
+ {
+ rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2;
+ rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound;
+ pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
+ }
+ }
+ }
+}
+
+
+AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+ AVCCommonObj *video = encvid->common;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ double L1, L2, L3, bpp;
+ int qp;
+ int i, j;
+
+ rateCtrl->basicUnit = video->PicSizeInMbs;
+
+ rateCtrl->MADofMB = (double*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,
+ video->PicSizeInMbs * sizeof(double), DEFAULT_ATTR);
+
+ if (!rateCtrl->MADofMB)
+ {
+ goto CLEANUP_RC;
+ }
+
+ if (rateCtrl->rcEnable == TRUE)
+ {
+ rateCtrl->pMP = (MultiPass*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, sizeof(MultiPass), DEFAULT_ATTR);
+ if (!rateCtrl->pMP)
+ {
+ goto CLEANUP_RC;
+ }
+ memset(rateCtrl->pMP, 0, sizeof(MultiPass));
+ rateCtrl->pMP->encoded_frames = -1; /* forget about the very first I frame */
+
+ /* RDInfo **pRDSamples */
+ rateCtrl->pMP->pRDSamples = (RDInfo **)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (30 * sizeof(RDInfo *)), DEFAULT_ATTR);
+ if (!rateCtrl->pMP->pRDSamples)
+ {
+ goto CLEANUP_RC;
+ }
+
+ for (i = 0; i < 30; i++)
+ {
+ rateCtrl->pMP->pRDSamples[i] = (RDInfo *)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (32 * sizeof(RDInfo)), DEFAULT_ATTR);
+ if (!rateCtrl->pMP->pRDSamples[i])
+ {
+ goto CLEANUP_RC;
+ }
+ for (j = 0; j < 32; j++) memset(&(rateCtrl->pMP->pRDSamples[i][j]), 0, sizeof(RDInfo));
+ }
+ rateCtrl->pMP->frameRange = (int)(rateCtrl->frame_rate * 1.0); /* 1.0s time frame*/
+ rateCtrl->pMP->frameRange = AVC_MAX(rateCtrl->pMP->frameRange, 5);
+ rateCtrl->pMP->frameRange = AVC_MIN(rateCtrl->pMP->frameRange, 30);
+
+ rateCtrl->pMP->framePos = -1;
+
+
+ rateCtrl->bitsPerFrame = (int32)(rateCtrl->bitRate / rateCtrl->frame_rate);
+
+ /* BX rate control */
+ rateCtrl->skip_next_frame = 0; /* must be initialized */
+
+ rateCtrl->Bs = rateCtrl->cpbSize;
+ rateCtrl->TMN_W = 0;
+ rateCtrl->VBV_fullness = (int)(rateCtrl->Bs * 0.5); /* rateCtrl->Bs */
+ rateCtrl->encoded_frames = 0;
+
+ rateCtrl->TMN_TH = rateCtrl->bitsPerFrame;
+
+ rateCtrl->max_BitVariance_num = (int)((OsclFloat)(rateCtrl->Bs - rateCtrl->VBV_fullness) / (rateCtrl->bitsPerFrame / 10.0)) - 5;
+ if (rateCtrl->max_BitVariance_num < 0) rateCtrl->max_BitVariance_num += 5;
+
+ // Set the initial buffer fullness
+ /* According to the spec, the initial buffer fullness needs to be set to 1/3 */
+ rateCtrl->VBV_fullness = (int)(rateCtrl->Bs / 3.0 - rateCtrl->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */
+ rateCtrl->pMP->counter_BTsrc = (int)((rateCtrl->Bs / 2.0 - rateCtrl->Bs / 3.0) / (rateCtrl->bitsPerFrame / 10.0));
+ rateCtrl->TMN_W = (int)(rateCtrl->VBV_fullness + rateCtrl->pMP->counter_BTsrc * (rateCtrl->bitsPerFrame / 10.0));
+
+ rateCtrl->low_bound = -rateCtrl->Bs / 2;
+ rateCtrl->VBV_fullness_offset = 0;
+
+ /* Setting the bitrate and framerate */
+ rateCtrl->pMP->bitrate = rateCtrl->bitRate;
+ rateCtrl->pMP->framerate = rateCtrl->frame_rate;
+ rateCtrl->pMP->target_bits_per_frame = rateCtrl->pMP->bitrate / rateCtrl->pMP->framerate;
+
+ /*compute the initial QP*/
+ bpp = 1.0 * rateCtrl->bitRate / (rateCtrl->frame_rate * (video->PicSizeInMbs << 8));
+ if (video->PicWidthInSamplesL == 176)
+ {
+ L1 = 0.1;
+ L2 = 0.3;
+ L3 = 0.6;
+ }
+ else if (video->PicWidthInSamplesL == 352)
+ {
+ L1 = 0.2;
+ L2 = 0.6;
+ L3 = 1.2;
+ }
+ else
+ {
+ L1 = 0.6;
+ L2 = 1.4;
+ L3 = 2.4;
+ }
+
+ if (rateCtrl->initQP == 0)
+ {
+ if (bpp <= L1)
+ qp = 35;
+ else if (bpp <= L2)
+ qp = 25;
+ else if (bpp <= L3)
+ qp = 20;
+ else
+ qp = 15;
+ rateCtrl->initQP = qp;
+ }
+
+ rateCtrl->Qc = rateCtrl->initQP;
+ }
+
+ return AVCENC_SUCCESS;
+
+CLEANUP_RC:
+
+ CleanupRateControlModule(avcHandle);
+ return AVCENC_MEMORY_FAIL;
+
+}
+
+
+void CleanupRateControlModule(AVCHandle *avcHandle)
+{
+ AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ int i;
+
+ if (rateCtrl->MADofMB)
+ {
+ avcHandle->CBAVC_Free(avcHandle->userData, (int)(rateCtrl->MADofMB));
+ }
+
+ if (rateCtrl->pMP)
+ {
+ if (rateCtrl->pMP->pRDSamples)
+ {
+ for (i = 0; i < 30; i++)
+ {
+ if (rateCtrl->pMP->pRDSamples[i])
+ {
+ avcHandle->CBAVC_Free(avcHandle->userData, (int)rateCtrl->pMP->pRDSamples[i]);
+ }
+ }
+ avcHandle->CBAVC_Free(avcHandle->userData, (int)rateCtrl->pMP->pRDSamples);
+ }
+ avcHandle->CBAVC_Free(avcHandle->userData, (int)(rateCtrl->pMP));
+ }
+
+ return ;
+}
+
+void RCInitGOP(AVCEncObject *encvid)
+{
+ /* in BX RC, there's no GOP-level RC */
+
+ OSCL_UNUSED_ARG(encvid);
+
+ return ;
+}
+
+
+void RCInitFrameQP(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ AVCPicParamSet *picParam = video->currPicParams;
+ MultiPass *pMP = rateCtrl->pMP;
+
+ if (rateCtrl->rcEnable == TRUE)
+ {
+ /* frame layer rate control */
+ if (rateCtrl->encoded_frames == 0)
+ {
+ video->QPy = rateCtrl->Qc = rateCtrl->initQP;
+ }
+ else
+ {
+ calculateQuantizer_Multipass(encvid, video, rateCtrl, pMP);
+ video->QPy = rateCtrl->Qc;
+ }
+
+ rateCtrl->NumberofHeaderBits = 0;
+ rateCtrl->NumberofTextureBits = 0;
+ rateCtrl->numFrameBits = 0; // reset
+
+ /* update pMP->framePos */
+ if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0;
+
+ if (rateCtrl->T == 0)
+ {
+ pMP->counter_BTdst = (int)(rateCtrl->frame_rate * 7.5 + 0.5); /* 0.75s time frame */
+ pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, (int)(rateCtrl->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */
+ pMP->counter_BTdst = AVC_MAX(pMP->counter_BTdst, (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.30 / (rateCtrl->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */
+ pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */
+
+ pMP->target_bits = rateCtrl->T = rateCtrl->TMN_TH = (int)(rateCtrl->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1));
+ pMP->diff_counter = pMP->counter_BTdst;
+ }
+
+ /* collect the necessary data: target bits, actual bits, mad and QP */
+ pMP->target_bits = rateCtrl->T;
+ pMP->QP = video->QPy;
+
+ pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl);
+ if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
+
+ pMP->bitrate = rateCtrl->bitRate; /* calculated in RCVopQPSetting */
+ pMP->framerate = rateCtrl->frame_rate;
+
+ /* first pass encoding */
+ pMP->nRe_Quantized = 0;
+
+ } // rcEnable
+ else
+ {
+ video->QPy = rateCtrl->initQP;
+ }
+
+// printf(" %d ",video->QPy);
+
+ if (video->CurrPicNum == 0 && encvid->outOfBandParamSet == FALSE)
+ {
+ picParam->pic_init_qs_minus26 = 0;
+ picParam->pic_init_qp_minus26 = video->QPy - 26;
+ }
+
+ // need this for motion estimation
+ encvid->lambda_mode = QP2QUANT[AVC_MAX(0, video->QPy-SHIFT_QP)];
+ encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode);
+ return ;
+}
+
+/* Mad based variable bit allocation + QP calculation with a new quadratic method */
+void calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video,
+ AVCRateControl *rateCtrl, MultiPass *pMP)
+{
+ int prev_actual_bits = 0, curr_target, /*pos=0,*/i, j;
+ OsclFloat Qstep, prev_QP = 0.625;
+
+ OsclFloat curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP;
+
+ /* Mad based variable bit allocation */
+ targetBitCalculation(encvid, video, rateCtrl, pMP);
+
+ if (rateCtrl->T <= 0 || rateCtrl->totalSAD == 0)
+ {
+ if (rateCtrl->T < 0) rateCtrl->Qc = RC_MAX_QUANT;
+ return;
+ }
+
+ /* ---------------------------------------------------------------------------------------------------*/
+ /* current frame QP estimation */
+ curr_target = rateCtrl->T;
+ curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs;
+ if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
+ curr_RD = (OsclFloat)curr_target / curr_mad;
+
+ if (rateCtrl->skip_next_frame == -1) // previous was skipped
+ {
+ i = pMP->framePos;
+ prev_mad = pMP->pRDSamples[i][0].mad;
+ prev_QP = pMP->pRDSamples[i][0].QP;
+ prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;
+ }
+ else
+ {
+ /* Another version of search the optimal point */
+ prev_mad = 0.0;
+ i = 0;
+ while (i < pMP->frameRange && prev_mad < 0.001) /* find first one with nonzero prev_mad */
+ {
+ prev_mad = pMP->pRDSamples[i][0].mad;
+ i++;
+ }
+
+ if (i < pMP->frameRange)
+ {
+ prev_actual_bits = pMP->pRDSamples[i-1][0].actual_bits;
+
+ for (j = 0; i < pMP->frameRange; i++)
+ {
+ if (pMP->pRDSamples[i][0].mad != 0 &&
+ AVC_ABS(prev_mad - curr_mad) > AVC_ABS(pMP->pRDSamples[i][0].mad - curr_mad))
+ {
+ prev_mad = pMP->pRDSamples[i][0].mad;
+ prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;
+ j = i;
+ }
+ }
+ prev_QP = QP2Qstep(pMP->pRDSamples[j][0].QP);
+
+ for (i = 1; i < pMP->samplesPerFrame[j]; i++)
+ {
+ if (AVC_ABS(prev_actual_bits - curr_target) > AVC_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target))
+ {
+ prev_actual_bits = pMP->pRDSamples[j][i].actual_bits;
+ prev_QP = QP2Qstep(pMP->pRDSamples[j][i].QP);
+ }
+ }
+ }
+ }
+
+ // quadratic approximation
+ if (prev_mad > 0.001) // only when prev_mad is greater than 0, otherwise keep using the same QP
+ {
+ prev_RD = (OsclFloat)prev_actual_bits / prev_mad;
+ //rateCtrl->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4);
+ if (prev_QP == 0.625) // added this to allow getting out of QP = 0 easily
+ {
+ Qstep = (int)(prev_RD / curr_RD + 0.5);
+ }
+ else
+ {
+ // rateCtrl->Qc =(Int)(prev_QP * M4VENC_SQRT(prev_RD/curr_RD) + 0.9);
+
+ if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0)
+ Qstep = (int)(prev_QP * (sqrt(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */
+ else
+ Qstep = (int)(prev_QP * (sqrt(prev_RD / curr_RD) + pow(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9);
+ }
+ // lower bound on Qc should be a function of curr_mad
+ // When mad is already low, lower bound on Qc doesn't have to be small.
+ // Note, this doesn't work well for low complexity clip encoded at high bit rate
+ // it doesn't hit the target bit rate due to this QP lower bound.
+ /// if((curr_mad < 8) && (rateCtrl->Qc < 12)) rateCtrl->Qc = 12;
+ // else if((curr_mad < 128) && (rateCtrl->Qc < 3)) rateCtrl->Qc = 3;
+
+ rateCtrl->Qc = Qstep2QP(Qstep);
+
+ if (rateCtrl->Qc < RC_MIN_QUANT) rateCtrl->Qc = RC_MIN_QUANT;
+ if (rateCtrl->Qc > RC_MAX_QUANT) rateCtrl->Qc = RC_MAX_QUANT;
+ }
+
+ /* active bit resource protection */
+ aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (OsclFloat)pMP->encoded_frames);
+ average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (OsclFloat)pMP->encoded_frames); /* this function is called from the scond encoded frame*/
+ if (pMP->diff_counter == 0 &&
+ ((OsclFloat)rateCtrl->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) &&
+ pMP->counter_BTsrc <= (pMP->counter_BTdst + (int)(pMP->framerate*1.0 + 0.5)))
+ {
+ rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame / 10.0);
+ rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W;
+ pMP->counter_BTsrc++;
+ pMP->diff_counter--;
+ }
+
+}
+
+void targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP)
+{
+ OSCL_UNUSED_ARG(encvid);
+ OsclFloat curr_mad;//, average_mad;
+ int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound;
+ /* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */
+
+ /* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/
+ updateRC_PostProc(rateCtrl, pMP);
+
+ /* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */
+ if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000)
+ {
+ pMP->counter_BTsrc -= 1000;
+ pMP->counter_BTdst -= 1000;
+ }
+
+ /* ---------------------------------------------------------------------------------------------------*/
+ /* target calculation */
+ curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs;
+ if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */
+ diff_counter_BTsrc = diff_counter_BTdst = 0;
+ pMP->diff_counter = 0;
+
+
+ /*1.calculate average mad */
+ pMP->sum_mad += curr_mad;
+ //average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(OsclFloat)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/
+ //pMP->aver_mad = average_mad;
+ if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */
+ pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1);
+
+ if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0)
+ pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1);
+
+ /*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */
+ if (pMP->overlapped_win_size == 0)
+ {
+ /* original verison */
+ if (curr_mad > pMP->aver_mad*1.1)
+ {
+ if (curr_mad / (pMP->aver_mad + 0.0001) > 2)
+ diff_counter_BTdst = (int)(sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10;
+ //diff_counter_BTdst = (int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10;
+ else
+ diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10;
+ }
+ else /* curr_mad <= average_mad*1.1 */
+ //diff_counter_BTsrc = 10 - (int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4);
+ diff_counter_BTsrc = 10 - (int)(sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5);
+
+ /* actively fill in the possible gap */
+ if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&
+ curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)
+ diff_counter_BTsrc = 1;
+
+ }
+ else if (pMP->overlapped_win_size > 0)
+ {
+ /* transition time: use previous average mad "pMP->aver_mad_prev" instead of the current average mad "pMP->aver_mad" */
+ if (curr_mad > pMP->aver_mad_prev*1.1)
+ {
+ if (curr_mad / pMP->aver_mad_prev > 2)
+ diff_counter_BTdst = (int)(sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10;
+ //diff_counter_BTdst = (int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10;
+ else
+ diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10;
+ }
+ else /* curr_mad <= average_mad*1.1 */
+ //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4);
+ diff_counter_BTsrc = 10 - (int)(sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5);
+
+ /* actively fill in the possible gap */
+ if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&
+ curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)
+ diff_counter_BTsrc = 1;
+
+ if (--pMP->overlapped_win_size <= 0) pMP->overlapped_win_size = 0;
+ }
+
+
+ /* if difference is too much, do clipping */
+ /* First, set the upper bound for current bit allocation variance: 80% of available buffer */
+ bound = (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rateCtrl->Bs */
+ diff_counter_BTsrc = AVC_MIN(diff_counter_BTsrc, bound);
+ diff_counter_BTdst = AVC_MIN(diff_counter_BTdst, bound);
+
+ /* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */
+ bound = 50;
+// if(video->encParams->RC_Type == CBR_LOWDELAY)
+// not necessary bound = 10; -- For Low delay */
+
+ diff_counter_BTsrc = AVC_MIN(diff_counter_BTsrc, bound);
+ diff_counter_BTdst = AVC_MIN(diff_counter_BTdst, bound);
+
+
+ /* Third, check the buffer */
+ prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc;
+ curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc);
+
+ if (AVC_ABS(prev_counter_diff) >= rateCtrl->max_BitVariance_num || AVC_ABS(curr_counter_diff) >= rateCtrl->max_BitVariance_num)
+ { //diff_counter_BTsrc = diff_counter_BTdst = 0;
+
+ if (curr_counter_diff > rateCtrl->max_BitVariance_num && diff_counter_BTdst)
+ {
+ diff_counter_BTdst = (rateCtrl->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc;
+ if (diff_counter_BTdst < 0) diff_counter_BTdst = 0;
+ }
+
+ else if (curr_counter_diff < -rateCtrl->max_BitVariance_num && diff_counter_BTsrc)
+ {
+ diff_counter_BTsrc = diff_counter_BTdst - (-rateCtrl->max_BitVariance_num - prev_counter_diff);
+ if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0;
+ }
+ }
+
+
+ /*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */
+ rateCtrl->TMN_TH = (int)(pMP->target_bits_per_frame);
+ pMP->diff_counter = 0;
+
+ if (diff_counter_BTsrc)
+ {
+ rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1);
+ pMP->diff_counter = -diff_counter_BTsrc;
+ }
+ else if (diff_counter_BTdst)
+ {
+ rateCtrl->TMN_TH += (int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1);
+ pMP->diff_counter = diff_counter_BTdst;
+ }
+
+
+ /*4.update pMP->counter_BTsrc, pMP->counter_BTdst */
+ pMP->counter_BTsrc += diff_counter_BTsrc;
+ pMP->counter_BTdst += diff_counter_BTdst;
+
+
+ /*5.target bit calculation */
+ rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W;
+
+ return ;
+}
+
+void updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP)
+{
+ if (rateCtrl->skip_next_frame > 0) /* skip next frame */
+ {
+ pMP->counter_BTsrc += 10 * rateCtrl->skip_next_frame;
+
+ }
+ else if (rateCtrl->skip_next_frame == -1) /* skip current frame */
+ {
+ pMP->counter_BTdst -= pMP->diff_counter;
+ pMP->counter_BTsrc += 10;
+
+ pMP->sum_mad -= pMP->mad;
+ pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (pMP->encoded_frames - 1 + 0.0001);
+ pMP->sum_QP -= pMP->QP;
+ pMP->encoded_frames --;
+ }
+ /* some stuff in update VBV_fullness remains here */
+ //if(rateCtrl->VBV_fullness < -rateCtrl->Bs/2) /* rateCtrl->Bs */
+ if (rateCtrl->VBV_fullness < rateCtrl->low_bound)
+ {
+ rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2;
+ rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound;
+ pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
+ }
+}
+
+
+void RCInitChromaQP(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCMacroblock *currMB = video->currMB;
+ int q_bits;
+
+ /* we have to do the same thing for AVC_CLIP3(0,51,video->QSy) */
+
+ video->QPy_div_6 = (currMB->QPy * 43) >> 8;
+ video->QPy_mod_6 = currMB->QPy - 6 * video->QPy_div_6;
+ currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, currMB->QPy + video->currPicParams->chroma_qp_index_offset)];
+ video->QPc_div_6 = (video->QPc * 43) >> 8;
+ video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;
+
+ /* pre-calculate this to save computation */
+ q_bits = 4 + video->QPy_div_6;
+ if (video->slice_type == AVC_I_SLICE)
+ {
+ encvid->qp_const = 682 << q_bits; // intra
+ }
+ else
+ {
+ encvid->qp_const = 342 << q_bits; // inter
+ }
+
+ q_bits = 4 + video->QPc_div_6;
+ if (video->slice_type == AVC_I_SLICE)
+ {
+ encvid->qp_const_c = 682 << q_bits; // intra
+ }
+ else
+ {
+ encvid->qp_const_c = 342 << q_bits; // inter
+ }
+
+ encvid->lambda_mode = QP2QUANT[AVC_MAX(0, currMB->QPy-SHIFT_QP)];
+ encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode);
+
+ return ;
+}
+
+
+void RCInitMBQP(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCMacroblock *currMB = video->currMB;
+
+ currMB->QPy = video->QPy; /* set to previous value or picture level */
+
+ RCInitChromaQP(encvid);
+
+}
+
+void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits)
+{
+ OSCL_UNUSED_ARG(video);
+ rateCtrl->numMBHeaderBits = num_header_bits;
+ rateCtrl->numMBTextureBits = num_texture_bits;
+ rateCtrl->NumberofHeaderBits += rateCtrl->numMBHeaderBits;
+ rateCtrl->NumberofTextureBits += rateCtrl->numMBTextureBits;
+}
+
+void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid)
+{
+ currMB->QPy = video->QPy; /* use previous QP */
+ RCInitChromaQP(encvid);
+
+ return ;
+}
+
+
+void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ uint32 dmin_lx;
+
+ if (rateCtrl->rcEnable == TRUE)
+ {
+ if (currMB->mb_intra)
+ {
+ if (currMB->mbMode == AVC_I16)
+ {
+ dmin_lx = (0xFFFF << 16) | orgPitch;
+ rateCtrl->MADofMB[video->mbNum] = AVCSAD_Macroblock_C(orgL,
+ encvid->pred_i16[currMB->i16Mode], dmin_lx, NULL);
+ }
+ else /* i4 */
+ {
+ rateCtrl->MADofMB[video->mbNum] = encvid->i4_sad / 256.;
+ }
+ }
+ /* for INTER, we have already saved it with the MV search */
+ }
+
+ return ;
+}
+
+
+
+AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid)
+{
+ AVCCommonObj *video = encvid->common;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ MultiPass *pMP = rateCtrl->pMP;
+ int diff_BTCounter;
+ int nal_type = video->nal_unit_type;
+
+ /* update the complexity weight of I, P, B frame */
+
+ if (rateCtrl->rcEnable == TRUE)
+ {
+ pMP->actual_bits = rateCtrl->numFrameBits;
+ pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl);
+
+ AVCSaveRDSamples(pMP, 0);
+
+ pMP->encoded_frames++;
+
+ /* for pMP->samplesPerFrame */
+ pMP->samplesPerFrame[pMP->framePos] = 0;
+
+ pMP->sum_QP += pMP->QP;
+
+ /* update pMP->counter_BTsrc, pMP->counter_BTdst */
+ /* re-allocate the target bit again and then stop encoding */
+ diff_BTCounter = (int)((OsclFloat)(rateCtrl->TMN_TH - rateCtrl->TMN_W - pMP->actual_bits) /
+ (pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1);
+ if (diff_BTCounter >= 0)
+ pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */
+ else
+ pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */
+
+ rateCtrl->TMN_TH -= (int)((OsclFloat)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1));
+ rateCtrl->T = pMP->target_bits = rateCtrl->TMN_TH - rateCtrl->TMN_W;
+ pMP->diff_counter -= diff_BTCounter;
+
+ rateCtrl->Rc = rateCtrl->numFrameBits; /* Total Bits for current frame */
+ rateCtrl->Hc = rateCtrl->NumberofHeaderBits; /* Total Bits in Header and Motion Vector */
+
+ /* BX_RC */
+ updateRateControl(rateCtrl, nal_type);
+ if (rateCtrl->skip_next_frame == -1) // skip current frame
+ {
+ status = AVCENC_SKIPPED_PICTURE;
+ }
+ }
+
+ rateCtrl->first_frame = 0; // reset here after we encode the first frame.
+
+ return status;
+}
+
+void AVCSaveRDSamples(MultiPass *pMP, int counter_samples)
+{
+ /* for pMP->pRDSamples */
+ pMP->pRDSamples[pMP->framePos][counter_samples].QP = pMP->QP;
+ pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits;
+ pMP->pRDSamples[pMP->framePos][counter_samples].mad = pMP->mad;
+ pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (OsclFloat)pMP->actual_bits / (pMP->mad + 0.0001);
+
+ return ;
+}
+
+void updateRateControl(AVCRateControl *rateCtrl, int nal_type)
+{
+ int frame_bits;
+ MultiPass *pMP = rateCtrl->pMP;
+
+ /* BX rate contro\l */
+ frame_bits = (int)(rateCtrl->bitRate / rateCtrl->frame_rate);
+ rateCtrl->TMN_W += (rateCtrl->Rc - rateCtrl->TMN_TH);
+ rateCtrl->VBV_fullness += (rateCtrl->Rc - frame_bits); //rateCtrl->Rp);
+ //if(rateCtrl->VBV_fullness < 0) rateCtrl->VBV_fullness = -1;
+
+ rateCtrl->encoded_frames++;
+
+ /* frame dropping */
+ rateCtrl->skip_next_frame = 0;
+
+ if ((rateCtrl->VBV_fullness > rateCtrl->Bs / 2) && nal_type != AVC_NALTYPE_IDR) /* skip the current frame */ /* rateCtrl->Bs */
+ {
+ rateCtrl->TMN_W -= (rateCtrl->Rc - rateCtrl->TMN_TH);
+ rateCtrl->VBV_fullness -= rateCtrl->Rc;
+ rateCtrl->skip_next_frame = -1;
+ }
+ else if ((OsclFloat)(rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95) /* skip next frame */
+ {
+ rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp;
+ rateCtrl->skip_next_frame = 1;
+ pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
+ /* BX_1, skip more than 1 frames */
+ //while(rateCtrl->VBV_fullness > rateCtrl->Bs*0.475)
+ while ((rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95)
+ {
+ rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp;
+ rateCtrl->skip_next_frame++;
+ pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));
+ }
+
+ /* END BX_1 */
+ }
+}
+
+
+double ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl)
+{
+ double TotalMAD;
+ int i;
+ TotalMAD = 0.0;
+ for (i = 0; i < (int)video->PicSizeInMbs; i++)
+ TotalMAD += rateCtrl->MADofMB[i];
+ TotalMAD /= video->PicSizeInMbs;
+ return TotalMAD;
+}
+
+
+
+
+
+/* convert from QP to Qstep */
+double QP2Qstep(int QP)
+{
+ int i;
+ double Qstep;
+ static const double QP2QSTEP[6] = { 0.625, 0.6875, 0.8125, 0.875, 1.0, 1.125 };
+
+ Qstep = QP2QSTEP[QP % 6];
+ for (i = 0; i < (QP / 6); i++)
+ Qstep *= 2;
+
+ return Qstep;
+}
+
+/* convert from step size to QP */
+int Qstep2QP(double Qstep)
+{
+ int q_per = 0, q_rem = 0;
+
+ // assert( Qstep >= QP2Qstep(0) && Qstep <= QP2Qstep(51) );
+ if (Qstep < QP2Qstep(0))
+ return 0;
+ else if (Qstep > QP2Qstep(51))
+ return 51;
+
+ while (Qstep > QP2Qstep(5))
+ {
+ Qstep /= 2;
+ q_per += 1;
+ }
+
+ if (Qstep <= (0.625 + 0.6875) / 2)
+ {
+ Qstep = 0.625;
+ q_rem = 0;
+ }
+ else if (Qstep <= (0.6875 + 0.8125) / 2)
+ {
+ Qstep = 0.6875;
+ q_rem = 1;
+ }
+ else if (Qstep <= (0.8125 + 0.875) / 2)
+ {
+ Qstep = 0.8125;
+ q_rem = 2;
+ }
+ else if (Qstep <= (0.875 + 1.0) / 2)
+ {
+ Qstep = 0.875;
+ q_rem = 3;
+ }
+ else if (Qstep <= (1.0 + 1.125) / 2)
+ {
+ Qstep = 1.0;
+ q_rem = 4;
+ }
+ else
+ {
+ Qstep = 1.125;
+ q_rem = 5;
+ }
+
+ return (q_per * 6 + q_rem);
+}
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/residual.cpp b/media/libstagefright/codecs/avc/enc/src/residual.cpp
new file mode 100644
index 0000000..42eb910
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/residual.cpp
@@ -0,0 +1,389 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+AVCEnc_Status EncodeIntraPCM(AVCEncObject *encvid)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ AVCCommonObj *video = encvid->common;
+ AVCFrameIO *currInput = encvid->currInput;
+ AVCEncBitstream *stream = encvid->bitstream;
+ int x_position = (video->mb_x << 4);
+ int y_position = (video->mb_y << 4);
+ int orgPitch = currInput->pitch;
+ int offset1 = y_position * orgPitch + x_position;
+ int i, j;
+ int offset;
+ uint8 *pDst, *pSrc;
+ uint code;
+
+ ue_v(stream, 25);
+
+ i = stream->bit_left & 0x7;
+ if (i) /* not byte-aligned */
+ {
+ BitstreamWriteBits(stream, 0, i);
+ }
+
+ pSrc = currInput->YCbCr[0] + offset1;
+ pDst = video->currPic->Sl + offset1;
+ offset = video->PicWidthInSamplesL - 16;
+
+ /* at this point bitstream is byte-aligned */
+ j = 16;
+ while (j > 0)
+ {
+#if (WORD_SIZE==32)
+ for (i = 0; i < 4; i++)
+ {
+ code = *((uint*)pSrc);
+ pSrc += 4;
+ *((uint*)pDst) = code;
+ pDst += 4;
+ status = BitstreamWriteBits(stream, 32, code);
+ }
+#else
+ for (i = 0; i < 8; i++)
+ {
+ code = *((uint*)pSrc);
+ pSrc += 2;
+ *((uint*)pDst) = code;
+ pDst += 2;
+ status = BitstreamWriteBits(stream, 16, code);
+ }
+#endif
+ pDst += offset;
+ pSrc += offset;
+ j--;
+ }
+ if (status != AVCENC_SUCCESS) /* check only once per line */
+ return status;
+
+ pDst = video->currPic->Scb + ((offset1 + x_position) >> 2);
+ pSrc = currInput->YCbCr[1] + ((offset1 + x_position) >> 2);
+ offset >>= 1;
+
+ j = 8;
+ while (j > 0)
+ {
+#if (WORD_SIZE==32)
+ for (i = 0; i < 2; i++)
+ {
+ code = *((uint*)pSrc);
+ pSrc += 4;
+ *((uint*)pDst) = code;
+ pDst += 4;
+ status = BitstreamWriteBits(stream, 32, code);
+ }
+#else
+ for (i = 0; i < 4; i++)
+ {
+ code = *((uint*)pSrc);
+ pSrc += 2;
+ *((uint*)pDst) = code;
+ pDst += 2;
+ status = BitstreamWriteBits(stream, 16, code);
+ }
+#endif
+ pDst += offset;
+ pSrc += offset;
+ j--;
+ }
+
+ if (status != AVCENC_SUCCESS) /* check only once per line */
+ return status;
+
+ pDst = video->currPic->Scr + ((offset1 + x_position) >> 2);
+ pSrc = currInput->YCbCr[2] + ((offset1 + x_position) >> 2);
+
+ j = 8;
+ while (j > 0)
+ {
+#if (WORD_SIZE==32)
+ for (i = 0; i < 2; i++)
+ {
+ code = *((uint*)pSrc);
+ pSrc += 4;
+ *((uint*)pDst) = code;
+ pDst += 4;
+ status = BitstreamWriteBits(stream, 32, code);
+ }
+#else
+ for (i = 0; i < 4; i++)
+ {
+ code = *((uint*)pSrc);
+ pSrc += 2;
+ *((uint*)pDst) = code;
+ pDst += 2;
+ status = BitstreamWriteBits(stream, 16, code);
+ }
+#endif
+ pDst += offset;
+ pSrc += offset;
+ j--;
+ }
+
+ return status;
+}
+
+
+AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int cindx, AVCMacroblock *currMB)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ AVCCommonObj *video = encvid->common;
+ int i, maxNumCoeff, nC;
+ int cdc = 0, cac = 0;
+ int TrailingOnes;
+ AVCEncBitstream *stream = encvid->bitstream;
+ uint trailing_ones_sign_flag;
+ int zerosLeft;
+ int *level, *run;
+ int TotalCoeff;
+ const static int incVlc[] = {0, 3, 6, 12, 24, 48, 32768}; // maximum vlc = 6
+ int escape, numPrefix, sufmask, suffix, shift, sign, value, absvalue, vlcnum, level_two_or_higher;
+ int bindx = blkIdx2blkXY[cindx>>2][cindx&3] ; // raster scan index
+
+ switch (type)
+ {
+ case AVC_Luma:
+ maxNumCoeff = 16;
+ level = encvid->level[cindx];
+ run = encvid->run[cindx];
+ TotalCoeff = currMB->nz_coeff[bindx];
+ break;
+ case AVC_Intra16DC:
+ maxNumCoeff = 16;
+ level = encvid->leveldc;
+ run = encvid->rundc;
+ TotalCoeff = cindx; /* special case */
+ bindx = 0;
+ cindx = 0;
+ break;
+ case AVC_Intra16AC:
+ maxNumCoeff = 15;
+ level = encvid->level[cindx];
+ run = encvid->run[cindx];
+ TotalCoeff = currMB->nz_coeff[bindx];
+ break;
+ case AVC_ChromaDC: /* how to differentiate Cb from Cr */
+ maxNumCoeff = 4;
+ cdc = 1;
+ if (cindx >= 8)
+ {
+ level = encvid->levelcdc + 4;
+ run = encvid->runcdc + 4;
+ TotalCoeff = cindx - 8; /* special case */
+ }
+ else
+ {
+ level = encvid->levelcdc;
+ run = encvid->runcdc;
+ TotalCoeff = cindx; /* special case */
+ }
+ break;
+ case AVC_ChromaAC:
+ maxNumCoeff = 15;
+ cac = 1;
+ level = encvid->level[cindx];
+ run = encvid->run[cindx];
+ cindx -= 16;
+ bindx = 16 + blkIdx2blkXY[cindx>>2][cindx&3];
+ cindx += 16;
+ TotalCoeff = currMB->nz_coeff[bindx];
+ break;
+ default:
+ return AVCENC_FAIL;
+ }
+
+
+ /* find TrailingOnes */
+ TrailingOnes = 0;
+ zerosLeft = 0;
+ i = TotalCoeff - 1;
+ nC = 1;
+ while (i >= 0)
+ {
+ zerosLeft += run[i];
+ if (nC && (level[i] == 1 || level[i] == -1))
+ {
+ TrailingOnes++;
+ }
+ else
+ {
+ nC = 0;
+ }
+ i--;
+ }
+ if (TrailingOnes > 3)
+ {
+ TrailingOnes = 3; /* clip it */
+ }
+
+ if (!cdc)
+ {
+ if (!cac) /* not chroma */
+ {
+ nC = predict_nnz(video, bindx & 3, bindx >> 2);
+ }
+ else /* chroma ac but not chroma dc */
+ {
+ nC = predict_nnz_chroma(video, bindx & 3, bindx >> 2);
+ }
+
+ status = ce_TotalCoeffTrailingOnes(stream, TrailingOnes, TotalCoeff, nC);
+ }
+ else
+ {
+ nC = -1; /* Chroma DC level */
+ status = ce_TotalCoeffTrailingOnesChromaDC(stream, TrailingOnes, TotalCoeff);
+ }
+
+ /* This part is done quite differently in ReadCoef4x4_CAVLC() */
+ if (TotalCoeff > 0)
+ {
+
+ i = TotalCoeff - 1;
+
+ if (TrailingOnes) /* keep reading the sign of those trailing ones */
+ {
+ nC = TrailingOnes;
+ trailing_ones_sign_flag = 0;
+ while (nC)
+ {
+ trailing_ones_sign_flag <<= 1;
+ trailing_ones_sign_flag |= ((uint32)level[i--] >> 31); /* 0 or positive, 1 for negative */
+ nC--;
+ }
+
+ /* instead of writing one bit at a time, read the whole thing at once */
+ status = BitstreamWriteBits(stream, TrailingOnes, trailing_ones_sign_flag);
+ }
+
+ level_two_or_higher = 1;
+ if (TotalCoeff > 3 && TrailingOnes == 3)
+ {
+ level_two_or_higher = 0;
+ }
+
+ if (TotalCoeff > 10 && TrailingOnes < 3)
+ {
+ vlcnum = 1;
+ }
+ else
+ {
+ vlcnum = 0;
+ }
+
+ /* then do this TotalCoeff-TrailingOnes times */
+ for (i = TotalCoeff - TrailingOnes - 1; i >= 0; i--)
+ {
+ value = level[i];
+ absvalue = (value >= 0) ? value : -value;
+
+ if (level_two_or_higher)
+ {
+ if (value > 0) value--;
+ else value++;
+ level_two_or_higher = 0;
+ }
+
+ if (value >= 0)
+ {
+ sign = 0;
+ }
+ else
+ {
+ sign = 1;
+ value = -value;
+ }
+
+ if (vlcnum == 0) // VLC1
+ {
+ if (value < 8)
+ {
+ status = BitstreamWriteBits(stream, value * 2 + sign - 1, 1);
+ }
+ else if (value < 8 + 8)
+ {
+ status = BitstreamWriteBits(stream, 14 + 1 + 4, (1 << 4) | ((value - 8) << 1) | sign);
+ }
+ else
+ {
+ status = BitstreamWriteBits(stream, 14 + 2 + 12, (1 << 12) | ((value - 16) << 1) | sign) ;
+ }
+ }
+ else // VLCN
+ {
+ shift = vlcnum - 1;
+ escape = (15 << shift) + 1;
+ numPrefix = (value - 1) >> shift;
+ sufmask = ~((0xffffffff) << shift);
+ suffix = (value - 1) & sufmask;
+ if (value < escape)
+ {
+ status = BitstreamWriteBits(stream, numPrefix + vlcnum + 1, (1 << (shift + 1)) | (suffix << 1) | sign);
+ }
+ else
+ {
+ status = BitstreamWriteBits(stream, 28, (1 << 12) | ((value - escape) << 1) | sign);
+ }
+
+ }
+
+ if (absvalue > incVlc[vlcnum])
+ vlcnum++;
+
+ if (i == TotalCoeff - TrailingOnes - 1 && absvalue > 3)
+ vlcnum = 2;
+ }
+
+ if (status != AVCENC_SUCCESS) /* occasionally check the bitstream */
+ {
+ return status;
+ }
+ if (TotalCoeff < maxNumCoeff)
+ {
+ if (!cdc)
+ {
+ ce_TotalZeros(stream, zerosLeft, TotalCoeff);
+ }
+ else
+ {
+ ce_TotalZerosChromaDC(stream, zerosLeft, TotalCoeff);
+ }
+ }
+ else
+ {
+ zerosLeft = 0;
+ }
+
+ i = TotalCoeff - 1;
+ while (i > 0) /* don't do the last one */
+ {
+ if (zerosLeft > 0)
+ {
+ ce_RunBefore(stream, run[i], zerosLeft);
+ }
+
+ zerosLeft = zerosLeft - run[i];
+ i--;
+ }
+ }
+
+ return status;
+}
diff --git a/media/libstagefright/codecs/avc/enc/src/sad.cpp b/media/libstagefright/codecs/avc/enc/src/sad.cpp
new file mode 100644
index 0000000..ae7acd2
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/sad.cpp
@@ -0,0 +1,290 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+#include "sad_inline.h"
+
+#define Cached_lx 176
+
+#ifdef _SAD_STAT
+uint32 num_sad_MB = 0;
+uint32 num_sad_Blk = 0;
+uint32 num_sad_MB_call = 0;
+uint32 num_sad_Blk_call = 0;
+
+#define NUM_SAD_MB_CALL() num_sad_MB_call++
+#define NUM_SAD_MB() num_sad_MB++
+#define NUM_SAD_BLK_CALL() num_sad_Blk_call++
+#define NUM_SAD_BLK() num_sad_Blk++
+
+#else
+
+#define NUM_SAD_MB_CALL()
+#define NUM_SAD_MB()
+#define NUM_SAD_BLK_CALL()
+#define NUM_SAD_BLK()
+
+#endif
+
+
+/* consist of
+int AVCSAD_Macroblock_C(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)
+int AVCSAD_MB_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)
+int AVCSAD_MB_HTFM(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)
+*/
+
+
+/*==================================================================
+ Function: SAD_Macroblock
+ Date: 09/07/2000
+ Purpose: Compute SAD 16x16 between blk and ref.
+ To do: Uniform subsampling will be inserted later!
+ Hypothesis Testing Fast Matching to be used later!
+ Changes:
+ 11/7/00: implemented MMX
+ 1/24/01: implemented SSE
+==================================================================*/
+/********** C ************/
+int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)
+{
+ (void)(extra_info);
+
+ int32 x10;
+ int dmin = (uint32)dmin_lx >> 16;
+ int lx = dmin_lx & 0xFFFF;
+
+ NUM_SAD_MB_CALL();
+
+ x10 = simd_sad_mb(ref, blk, dmin, lx);
+
+ return x10;
+}
+
+#ifdef HTFM /* HTFM with uniform subsampling implementation 2/28/01 */
+/*===============================================================
+ Function: AVCAVCSAD_MB_HTFM_Collect and AVCSAD_MB_HTFM
+ Date: 3/2/1
+ Purpose: Compute the SAD on a 16x16 block using
+ uniform subsampling and hypothesis testing fast matching
+ for early dropout. SAD_MB_HP_HTFM_Collect is to collect
+ the statistics to compute the thresholds to be used in
+ SAD_MB_HP_HTFM.
+ Input/Output:
+ Changes:
+ ===============================================================*/
+
+int AVCAVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)
+{
+ int i;
+ int sad = 0;
+ uint8 *p1;
+ int lx4 = (dmin_lx << 2) & 0x3FFFC;
+ uint32 cur_word;
+ int saddata[16], tmp, tmp2; /* used when collecting flag (global) is on */
+ int difmad;
+ int madstar;
+ HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
+ int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
+ uint *countbreak = &(htfm_stat->countbreak);
+ int *offsetRef = htfm_stat->offsetRef;
+
+ madstar = (uint32)dmin_lx >> 20;
+
+ NUM_SAD_MB_CALL();
+
+ blk -= 4;
+ for (i = 0; i < 16; i++)
+ {
+ p1 = ref + offsetRef[i];
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ NUM_SAD_MB();
+
+ saddata[i] = sad;
+
+ if (i > 0)
+ {
+ if ((uint32)sad > ((uint32)dmin_lx >> 16))
+ {
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+ return sad;
+ }
+ }
+ }
+
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+ return sad;
+}
+
+int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)
+{
+ int sad = 0;
+ uint8 *p1;
+
+ int i;
+ int tmp, tmp2;
+ int lx4 = (dmin_lx << 2) & 0x3FFFC;
+ int sadstar = 0, madstar;
+ int *nrmlz_th = (int*) extra_info;
+ int *offsetRef = (int*) extra_info + 32;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_lx >> 20;
+
+ NUM_SAD_MB_CALL();
+
+ blk -= 4;
+ for (i = 0; i < 16; i++)
+ {
+ p1 = ref + offsetRef[i];
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = (cur_word >> 24) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[8];
+ tmp2 = (cur_word >> 16) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[4];
+ tmp2 = (cur_word >> 8) & 0xFF;
+ sad = SUB_SAD(sad, tmp, tmp2);
+ tmp = p1[0];
+ p1 += lx4;
+ tmp2 = (cur_word & 0xFF);
+ sad = SUB_SAD(sad, tmp, tmp2);
+
+ NUM_SAD_MB();
+
+ sadstar += madstar;
+ if (((uint32)sad <= ((uint32)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++)))
+ ;
+ else
+ return 65536;
+ }
+
+ return sad;
+}
+#endif /* HTFM */
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel.cpp b/media/libstagefright/codecs/avc/enc/src/sad_halfpel.cpp
new file mode 100644
index 0000000..faf2198
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/sad_halfpel.cpp
@@ -0,0 +1,629 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+/* contains
+int AVCHalfPel1_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh)
+int AVCHalfPel2_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width)
+int AVCHalfPel1_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh)
+int AVCHalfPel2_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width)
+
+int AVCSAD_MB_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
+int AVCSAD_MB_HP_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
+int AVCSAD_MB_HP_HTFM(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
+int AVCSAD_Blk_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)
+*/
+
+#include "avcenc_lib.h"
+#include "sad_halfpel_inline.h"
+
+#ifdef _SAD_STAT
+uint32 num_sad_HP_MB = 0;
+uint32 num_sad_HP_Blk = 0;
+uint32 num_sad_HP_MB_call = 0;
+uint32 num_sad_HP_Blk_call = 0;
+#define NUM_SAD_HP_MB_CALL() num_sad_HP_MB_call++
+#define NUM_SAD_HP_MB() num_sad_HP_MB++
+#define NUM_SAD_HP_BLK_CALL() num_sad_HP_Blk_call++
+#define NUM_SAD_HP_BLK() num_sad_HP_Blk++
+#else
+#define NUM_SAD_HP_MB_CALL()
+#define NUM_SAD_HP_MB()
+#define NUM_SAD_HP_BLK_CALL()
+#define NUM_SAD_HP_BLK()
+#endif
+
+
+
+/*===============================================================
+ Function: SAD_MB_HalfPel
+ Date: 09/17/2000
+ Purpose: Compute the SAD on the half-pel resolution
+ Input/Output: hmem is assumed to be a pointer to the starting
+ point of the search in the 33x33 matrix search region
+ Changes:
+ 11/7/00: implemented MMX
+ ===============================================================*/
+/*==================================================================
+ Function: AVCSAD_MB_HalfPel_C
+ Date: 04/30/2001
+ Purpose: Compute SAD 16x16 between blk and ref in halfpel
+ resolution,
+ Changes:
+ ==================================================================*/
+/* One component is half-pel */
+int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ (void)(extra_info);
+
+ int i, j;
+ int sad = 0;
+ uint8 *kk, *p1, *p2, *p3, *p4;
+// int sumref=0;
+ int temp;
+ int rx = dmin_rx & 0xFFFF;
+
+ NUM_SAD_HP_MB_CALL();
+
+ p1 = ref;
+ p2 = ref + 1;
+ p3 = ref + rx;
+ p4 = ref + rx + 1;
+ kk = blk;
+
+ for (i = 0; i < 16; i++)
+ {
+ for (j = 0; j < 16; j++)
+ {
+
+ temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;
+ sad += AVC_ABS(temp);
+ }
+
+ NUM_SAD_HP_MB();
+
+ if (sad > (int)((uint32)dmin_rx >> 16))
+ return sad;
+
+ p1 += rx;
+ p3 += rx;
+ p2 += rx;
+ p4 += rx;
+ }
+ return sad;
+}
+
+int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ (void)(extra_info);
+
+ int i, j;
+ int sad = 0;
+ uint8 *kk, *p1, *p2;
+// int sumref=0;
+ int temp;
+ int rx = dmin_rx & 0xFFFF;
+
+ NUM_SAD_HP_MB_CALL();
+
+ p1 = ref;
+ p2 = ref + rx; /* either left/right or top/bottom pixel */
+ kk = blk;
+
+ for (i = 0; i < 16; i++)
+ {
+ for (j = 0; j < 16; j++)
+ {
+
+ temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;
+ sad += AVC_ABS(temp);
+ }
+
+ NUM_SAD_HP_MB();
+
+ if (sad > (int)((uint32)dmin_rx >> 16))
+ return sad;
+ p1 += rx;
+ p2 += rx;
+ }
+ return sad;
+}
+
+int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ (void)(extra_info);
+
+ int i, j;
+ int sad = 0;
+ uint8 *kk, *p1;
+ int temp;
+ int rx = dmin_rx & 0xFFFF;
+
+ NUM_SAD_HP_MB_CALL();
+
+ p1 = ref;
+ kk = blk;
+
+ for (i = 0; i < 16; i++)
+ {
+ for (j = 0; j < 16; j++)
+ {
+
+ temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++;
+ sad += AVC_ABS(temp);
+ }
+
+ NUM_SAD_HP_MB();
+
+ if (sad > (int)((uint32)dmin_rx >> 16))
+ return sad;
+ p1 += rx;
+ }
+ return sad;
+}
+
+#ifdef HTFM /* HTFM with uniform subsampling implementation, 2/28/01 */
+
+//Checheck here
+int AVCAVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ int i, j;
+ int sad = 0;
+ uint8 *p1, *p2;
+ int rx = dmin_rx & 0xFFFF;
+ int refwx4 = rx << 2;
+ int saddata[16]; /* used when collecting flag (global) is on */
+ int difmad, tmp, tmp2;
+ int madstar;
+ HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
+ int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
+ UInt *countbreak = &(htfm_stat->countbreak);
+ int *offsetRef = htfm_stat->offsetRef;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_rx >> 20;
+
+ NUM_SAD_HP_MB_CALL();
+
+ blk -= 4;
+
+ for (i = 0; i < 16; i++) /* 16 stages */
+ {
+ p1 = ref + offsetRef[i];
+ p2 = p1 + rx;
+
+ j = 4;/* 4 lines */
+ do
+ {
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12] + p2[12];
+ tmp2 = p1[13] + p2[13];
+ tmp += tmp2;
+ tmp2 = (cur_word >> 24) & 0xFF;
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[8] + p2[8];
+ tmp2 = p1[9] + p2[9];
+ tmp += tmp2;
+ tmp2 = (cur_word >> 16) & 0xFF;
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[4] + p2[4];
+ tmp2 = p1[5] + p2[5];
+ tmp += tmp2;
+ tmp2 = (cur_word >> 8) & 0xFF;
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ tmp2 = p1[1] + p2[1];
+ tmp = p1[0] + p2[0];
+ p1 += refwx4;
+ p2 += refwx4;
+ tmp += tmp2;
+ tmp2 = (cur_word & 0xFF);
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ }
+ while (--j);
+
+ NUM_SAD_HP_MB();
+
+ saddata[i] = sad;
+
+ if (i > 0)
+ {
+ if (sad > ((uint32)dmin_rx >> 16))
+ {
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+ return sad;
+ }
+ }
+ }
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+
+ return sad;
+}
+
+int AVCAVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ int i, j;
+ int sad = 0;
+ uint8 *p1, *p2;
+ int rx = dmin_rx & 0xFFFF;
+ int refwx4 = rx << 2;
+ int saddata[16]; /* used when collecting flag (global) is on */
+ int difmad, tmp, tmp2;
+ int madstar;
+ HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
+ int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
+ UInt *countbreak = &(htfm_stat->countbreak);
+ int *offsetRef = htfm_stat->offsetRef;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_rx >> 20;
+
+ NUM_SAD_HP_MB_CALL();
+
+ blk -= 4;
+
+ for (i = 0; i < 16; i++) /* 16 stages */
+ {
+ p1 = ref + offsetRef[i];
+ p2 = p1 + rx;
+ j = 4;
+ do
+ {
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = p2[12];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 24) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[8];
+ tmp2 = p2[8];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 16) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[4];
+ tmp2 = p2[4];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 8) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[0];
+ p1 += refwx4;
+ tmp2 = p2[0];
+ p2 += refwx4;
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word & 0xFF);
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ }
+ while (--j);
+
+ NUM_SAD_HP_MB();
+
+ saddata[i] = sad;
+
+ if (i > 0)
+ {
+ if (sad > ((uint32)dmin_rx >> 16))
+ {
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+ return sad;
+ }
+ }
+ }
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+
+ return sad;
+}
+
+int AVCAVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ int i, j;
+ int sad = 0;
+ uint8 *p1;
+ int rx = dmin_rx & 0xFFFF;
+ int refwx4 = rx << 2;
+ int saddata[16]; /* used when collecting flag (global) is on */
+ int difmad, tmp, tmp2;
+ int madstar;
+ HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;
+ int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);
+ UInt *countbreak = &(htfm_stat->countbreak);
+ int *offsetRef = htfm_stat->offsetRef;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_rx >> 20;
+
+ NUM_SAD_HP_MB_CALL();
+
+ blk -= 4;
+
+ for (i = 0; i < 16; i++) /* 16 stages */
+ {
+ p1 = ref + offsetRef[i];
+
+ j = 4; /* 4 lines */
+ do
+ {
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = p1[13];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 24) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[8];
+ tmp2 = p1[9];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 16) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[4];
+ tmp2 = p1[5];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 8) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[0];
+ tmp2 = p1[1];
+ p1 += refwx4;
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word & 0xFF);
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ }
+ while (--j);
+
+ NUM_SAD_HP_MB();
+
+ saddata[i] = sad;
+
+ if (i > 0)
+ {
+ if (sad > ((uint32)dmin_rx >> 16))
+ {
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+ return sad;
+ }
+ }
+ }
+ difmad = saddata[0] - ((saddata[1] + 1) >> 1);
+ (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);
+ (*countbreak)++;
+
+ return sad;
+}
+
+int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ int i, j;
+ int sad = 0, tmp, tmp2;
+ uint8 *p1, *p2;
+ int rx = dmin_rx & 0xFFFF;
+ int refwx4 = rx << 2;
+ int sadstar = 0, madstar;
+ int *nrmlz_th = (int*) extra_info;
+ int *offsetRef = nrmlz_th + 32;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_rx >> 20;
+
+ NUM_SAD_HP_MB_CALL();
+
+ blk -= 4;
+
+ for (i = 0; i < 16; i++) /* 16 stages */
+ {
+ p1 = ref + offsetRef[i];
+ p2 = p1 + rx;
+
+ j = 4; /* 4 lines */
+ do
+ {
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12] + p2[12];
+ tmp2 = p1[13] + p2[13];
+ tmp += tmp2;
+ tmp2 = (cur_word >> 24) & 0xFF;
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[8] + p2[8];
+ tmp2 = p1[9] + p2[9];
+ tmp += tmp2;
+ tmp2 = (cur_word >> 16) & 0xFF;
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[4] + p2[4];
+ tmp2 = p1[5] + p2[5];
+ tmp += tmp2;
+ tmp2 = (cur_word >> 8) & 0xFF;
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ tmp2 = p1[1] + p2[1];
+ tmp = p1[0] + p2[0];
+ p1 += refwx4;
+ p2 += refwx4;
+ tmp += tmp2;
+ tmp2 = (cur_word & 0xFF);
+ tmp += 2;
+ sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;
+ }
+ while (--j);
+
+ NUM_SAD_HP_MB();
+
+ sadstar += madstar;
+ if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))
+ {
+ return 65536;
+ }
+ }
+
+ return sad;
+}
+
+int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ int i, j;
+ int sad = 0, tmp, tmp2;
+ uint8 *p1, *p2;
+ int rx = dmin_rx & 0xFFFF;
+ int refwx4 = rx << 2;
+ int sadstar = 0, madstar;
+ int *nrmlz_th = (int*) extra_info;
+ int *offsetRef = nrmlz_th + 32;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_rx >> 20;
+
+ NUM_SAD_HP_MB_CALL();
+
+ blk -= 4;
+
+ for (i = 0; i < 16; i++) /* 16 stages */
+ {
+ p1 = ref + offsetRef[i];
+ p2 = p1 + rx;
+ j = 4;
+ do
+ {
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = p2[12];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 24) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[8];
+ tmp2 = p2[8];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 16) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[4];
+ tmp2 = p2[4];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 8) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[0];
+ p1 += refwx4;
+ tmp2 = p2[0];
+ p2 += refwx4;
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word & 0xFF);
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ }
+ while (--j);
+
+ NUM_SAD_HP_MB();
+ sadstar += madstar;
+ if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))
+ {
+ return 65536;
+ }
+ }
+
+ return sad;
+}
+
+int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)
+{
+ int i, j;
+ int sad = 0, tmp, tmp2;
+ uint8 *p1;
+ int rx = dmin_rx & 0xFFFF;
+ int refwx4 = rx << 2;
+ int sadstar = 0, madstar;
+ int *nrmlz_th = (int*) extra_info;
+ int *offsetRef = nrmlz_th + 32;
+ uint32 cur_word;
+
+ madstar = (uint32)dmin_rx >> 20;
+
+ NUM_SAD_HP_MB_CALL();
+
+ blk -= 4;
+
+ for (i = 0; i < 16; i++) /* 16 stages */
+ {
+ p1 = ref + offsetRef[i];
+
+ j = 4;/* 4 lines */
+ do
+ {
+ cur_word = *((uint32*)(blk += 4));
+ tmp = p1[12];
+ tmp2 = p1[13];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 24) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[8];
+ tmp2 = p1[9];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 16) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[4];
+ tmp2 = p1[5];
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word >> 8) & 0xFF;
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ tmp = p1[0];
+ tmp2 = p1[1];
+ p1 += refwx4;
+ tmp++;
+ tmp2 += tmp;
+ tmp = (cur_word & 0xFF);
+ sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;
+ }
+ while (--j);
+
+ NUM_SAD_HP_MB();
+
+ sadstar += madstar;
+ if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))
+ {
+ return 65536;
+ }
+ }
+
+ return sad;
+}
+
+#endif /* HTFM */
+
+
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
new file mode 100644
index 0000000..3a21647
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h
@@ -0,0 +1,96 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+
+#ifndef _SAD_HALFPEL_INLINE_H_
+#define _SAD_HALFPEL_INLINE_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
+
+ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+ tmp = (tmp2 >> 1) - tmp;
+ if (tmp > 0) sad += tmp;
+ else sad -= tmp;
+
+ return sad;
+ }
+
+ __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+ tmp = (tmp >> 2) - tmp2;
+ if (tmp > 0) sad += tmp;
+ else sad -= tmp;
+
+ return sad;
+ }
+
+#elif defined(__CC_ARM) /* only work with arm v5 */
+
+ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+ __asm
+ {
+ rsbs tmp, tmp, tmp2, asr #1 ;
+ rsbmi tmp, tmp, #0 ;
+ add sad, sad, tmp ;
+ }
+
+ return sad;
+ }
+
+ __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+ __asm
+ {
+ rsbs tmp, tmp2, tmp, asr #2 ;
+ rsbmi tmp, tmp, #0 ;
+ add sad, sad, tmp ;
+ }
+
+ return sad;
+ }
+
+#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
+
+ __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+__asm__ volatile("rsbs %1, %1, %2, asr #1\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2));
+
+ return sad;
+ }
+
+ __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+__asm__ volatile("rsbs %1, %2, %1, asr #2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2));
+
+ return sad;
+ }
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_SAD_HALFPEL_INLINE_H_
+
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
new file mode 100644
index 0000000..f39794f
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/sad_inline.h
@@ -0,0 +1,488 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#ifndef _SAD_INLINE_H_
+#define _SAD_INLINE_H_
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
+
+ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+ tmp = tmp - tmp2;
+ if (tmp > 0) sad += tmp;
+ else sad -= tmp;
+
+ return sad;
+ }
+
+ __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
+ {
+ int32 x7;
+
+ x7 = src2 ^ src1; /* check odd/even combination */
+ if ((uint32)src2 >= (uint32)src1)
+ {
+ src1 = src2 - src1; /* subs */
+ }
+ else
+ {
+ src1 = src1 - src2;
+ }
+ x7 = x7 ^ src1; /* only odd bytes need to add carry */
+ x7 = mask & ((uint32)x7 >> 1);
+ x7 = (x7 << 8) - x7;
+ src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */
+ src1 = src1 ^(x7 >> 7); /* take absolute value of negative byte */
+
+ return src1;
+ }
+
+#define NUMBER 3
+#define SHIFT 24
+
+#include "sad_mb_offset.h"
+
+#undef NUMBER
+#define NUMBER 2
+#undef SHIFT
+#define SHIFT 16
+#include "sad_mb_offset.h"
+
+#undef NUMBER
+#define NUMBER 1
+#undef SHIFT
+#define SHIFT 8
+#include "sad_mb_offset.h"
+
+
+ __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)
+ {
+ int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
+
+ x9 = 0x80808080; /* const. */
+
+ x8 = (uint32)ref & 0x3;
+ if (x8 == 3)
+ goto SadMBOffset3;
+ if (x8 == 2)
+ goto SadMBOffset2;
+ if (x8 == 1)
+ goto SadMBOffset1;
+
+// x5 = (x4<<8)-x4; /* x5 = x4*255; */
+ x4 = x5 = 0;
+
+ x6 = 0xFFFF00FF;
+
+ ref -= lx;
+ blk -= 16;
+
+ x8 = 16;
+
+LOOP_SAD0:
+ /****** process 8 pixels ******/
+ x10 = *((uint32*)(ref += lx));
+ x11 = *((uint32*)(ref + 4));
+ x12 = *((uint32*)(blk += 16));
+ x14 = *((uint32*)(blk + 4));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****** process 8 pixels ******/
+ x10 = *((uint32*)(ref + 8));
+ x11 = *((uint32*)(ref + 12));
+ x12 = *((uint32*)(blk + 8));
+ x14 = *((uint32*)(blk + 12));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****************/
+ x10 = x5 - (x4 << 8); /* extract low bytes */
+ x10 = x10 + x4; /* add with high bytes */
+ x10 = x10 + (x10 << 16); /* add with lower half word */
+
+ if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */
+ {
+ if (--x8)
+ {
+ goto LOOP_SAD0;
+ }
+
+ }
+
+ return ((uint32)x10 >> 16);
+
+SadMBOffset3:
+
+ return sad_mb_offset3(ref, blk, lx, dmin);
+
+SadMBOffset2:
+
+ return sad_mb_offset2(ref, blk, lx, dmin);
+
+SadMBOffset1:
+
+ return sad_mb_offset1(ref, blk, lx, dmin);
+
+ }
+
+#elif defined(__CC_ARM) /* only work with arm v5 */
+
+ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+ __asm
+ {
+ rsbs tmp, tmp, tmp2 ;
+ rsbmi tmp, tmp, #0 ;
+ add sad, sad, tmp ;
+ }
+
+ return sad;
+ }
+
+ __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
+ {
+ int32 x7;
+
+ __asm
+ {
+ EOR x7, src2, src1; /* check odd/even combination */
+ SUBS src1, src2, src1;
+ EOR x7, x7, src1;
+ AND x7, mask, x7, lsr #1;
+ ORRCC x7, x7, #0x80000000;
+ RSB x7, x7, x7, lsl #8;
+ ADD src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */
+ EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */
+ }
+
+ return src1;
+ }
+
+ __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)
+ {
+ int32 x7;
+
+ __asm
+ {
+ EOR x7, src2, src1; /* check odd/even combination */
+ ADDS src1, src2, src1;
+ EOR x7, x7, src1; /* only odd bytes need to add carry */
+ ANDS x7, mask, x7, rrx;
+ RSB x7, x7, x7, lsl #8;
+ SUB src1, src1, x7, asr #7; /* add 0xFF to the negative byte, add back carry */
+ EOR src1, src1, x7, asr #7; /* take absolute value of negative byte */
+ }
+
+ return src1;
+ }
+
+#define sum_accumulate __asm{ SBC x5, x5, x10; /* accumulate low bytes */ \
+ BIC x10, x6, x10; /* x10 & 0xFF00FF00 */ \
+ ADD x4, x4, x10,lsr #8; /* accumulate high bytes */ \
+ SBC x5, x5, x11; /* accumulate low bytes */ \
+ BIC x11, x6, x11; /* x11 & 0xFF00FF00 */ \
+ ADD x4, x4, x11,lsr #8; } /* accumulate high bytes */
+
+
+#define NUMBER 3
+#define SHIFT 24
+#define INC_X8 0x08000001
+
+#include "sad_mb_offset.h"
+
+#undef NUMBER
+#define NUMBER 2
+#undef SHIFT
+#define SHIFT 16
+#undef INC_X8
+#define INC_X8 0x10000001
+#include "sad_mb_offset.h"
+
+#undef NUMBER
+#define NUMBER 1
+#undef SHIFT
+#define SHIFT 8
+#undef INC_X8
+#define INC_X8 0x08000001
+#include "sad_mb_offset.h"
+
+
+ __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)
+ {
+ int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
+
+ x9 = 0x80808080; /* const. */
+ x4 = x5 = 0;
+
+ __asm
+ {
+ MOVS x8, ref, lsl #31 ;
+ BHI SadMBOffset3;
+ BCS SadMBOffset2;
+ BMI SadMBOffset1;
+
+ MVN x6, #0xFF00;
+ }
+LOOP_SAD0:
+ /****** process 8 pixels ******/
+ x11 = *((int32*)(ref + 12));
+ x10 = *((int32*)(ref + 8));
+ x14 = *((int32*)(blk + 12));
+ x12 = *((int32*)(blk + 8));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ __asm
+ {
+ /****** process 8 pixels ******/
+ LDR x11, [ref, #4];
+ LDR x10, [ref], lx ;
+ LDR x14, [blk, #4];
+ LDR x12, [blk], #16 ;
+ }
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****************/
+ x10 = x5 - (x4 << 8); /* extract low bytes */
+ x10 = x10 + x4; /* add with high bytes */
+ x10 = x10 + (x10 << 16); /* add with lower half word */
+
+ __asm
+ {
+ /****************/
+ RSBS x11, dmin, x10, lsr #16;
+ ADDLSS x8, x8, #0x10000001;
+ BLS LOOP_SAD0;
+ }
+
+ return ((uint32)x10 >> 16);
+
+SadMBOffset3:
+
+ return sad_mb_offset3(ref, blk, lx, dmin, x8);
+
+SadMBOffset2:
+
+ return sad_mb_offset2(ref, blk, lx, dmin, x8);
+
+SadMBOffset1:
+
+ return sad_mb_offset1(ref, blk, lx, dmin, x8);
+ }
+
+
+#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
+
+ __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)
+ {
+__asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad): "r"(tmp), "r"(tmp2));
+ return sad;
+ }
+
+ __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)
+ {
+ int32 x7;
+
+__asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %1, %3, %1, lsr #1\n\tORRCC %1, %1, #0x80000000\n\tRSB %1, %1, %1, lsl #8\n\tADD %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask));
+
+ return src1;
+ }
+
+ __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)
+ {
+ int32 x7;
+
+__asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS %1, %3, %1, rrx\n\tRSB %1, %1, %1, lsl #8\n\tSUB %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask));
+
+ return src1;
+ }
+
+#define sum_accumulate __asm__ volatile("SBC %0, %0, %1\n\tBIC %1, %4, %1\n\tADD %2, %2, %1, lsr #8\n\tSBC %0, %0, %3\n\tBIC %3, %4, %3\n\tADD %2, %2, %3, lsr #8": "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11): "r" (x6));
+
+#define NUMBER 3
+#define SHIFT 24
+#define INC_X8 0x08000001
+
+#include "sad_mb_offset.h"
+
+#undef NUMBER
+#define NUMBER 2
+#undef SHIFT
+#define SHIFT 16
+#undef INC_X8
+#define INC_X8 0x10000001
+#include "sad_mb_offset.h"
+
+#undef NUMBER
+#define NUMBER 1
+#undef SHIFT
+#define SHIFT 8
+#undef INC_X8
+#define INC_X8 0x08000001
+#include "sad_mb_offset.h"
+
+
+ __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)
+ {
+ int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
+
+ x9 = 0x80808080; /* const. */
+ x4 = x5 = 0;
+
+ x8 = (uint32)ref & 0x3;
+ if (x8 == 3)
+ goto SadMBOffset3;
+ if (x8 == 2)
+ goto SadMBOffset2;
+ if (x8 == 1)
+ goto SadMBOffset1;
+
+ x8 = 16;
+///
+__asm__ volatile("MVN %0, #0xFF00": "=r"(x6));
+
+LOOP_SAD0:
+ /****** process 8 pixels ******/
+ x11 = *((int32*)(ref + 12));
+ x10 = *((int32*)(ref + 8));
+ x14 = *((int32*)(blk + 12));
+ x12 = *((int32*)(blk + 8));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****** process 8 pixels ******/
+ x11 = *((int32*)(ref + 4));
+__asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx));
+ //x10 = *((int32*)ref); ref+=lx;
+ x14 = *((int32*)(blk + 4));
+__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****************/
+ x10 = x5 - (x4 << 8); /* extract low bytes */
+ x10 = x10 + x4; /* add with high bytes */
+ x10 = x10 + (x10 << 16); /* add with lower half word */
+
+ /****************/
+
+ if (((uint32)x10 >> 16) <= dmin) /* compare with dmin */
+ {
+ if (--x8)
+ {
+ goto LOOP_SAD0;
+ }
+
+ }
+
+ return ((uint32)x10 >> 16);
+
+SadMBOffset3:
+
+ return sad_mb_offset3(ref, blk, lx, dmin);
+
+SadMBOffset2:
+
+ return sad_mb_offset2(ref, blk, lx, dmin);
+
+SadMBOffset1:
+
+ return sad_mb_offset1(ref, blk, lx, dmin);
+ }
+
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _SAD_INLINE_H_
+
diff --git a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
new file mode 100644
index 0000000..d5d4a42
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h
@@ -0,0 +1,311 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+
+#if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
+
+#if (NUMBER==3)
+__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin)
+#elif (NUMBER==2)
+__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin)
+#elif (NUMBER==1)
+__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)
+#endif
+{
+ int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
+
+ // x5 = (x4<<8) - x4;
+ x4 = x5 = 0;
+ x6 = 0xFFFF00FF;
+ x9 = 0x80808080; /* const. */
+ ref -= NUMBER; /* bic ref, ref, #3 */
+ ref -= lx;
+ blk -= 16;
+ x8 = 16;
+
+#if (NUMBER==3)
+LOOP_SAD3:
+#elif (NUMBER==2)
+LOOP_SAD2:
+#elif (NUMBER==1)
+LOOP_SAD1:
+#endif
+ /****** process 8 pixels ******/
+ x10 = *((uint32*)(ref += lx)); /* D C B A */
+ x11 = *((uint32*)(ref + 4)); /* H G F E */
+ x12 = *((uint32*)(ref + 8)); /* L K J I */
+
+ x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */
+ x10 = x10 | (x11 << (32 - SHIFT)); /* G F E D */
+ x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */
+ x11 = x11 | (x12 << (32 - SHIFT)); /* K J I H */
+
+ x12 = *((uint32*)(blk += 16));
+ x14 = *((uint32*)(blk + 4));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****** process 8 pixels ******/
+ x10 = *((uint32*)(ref + 8)); /* D C B A */
+ x11 = *((uint32*)(ref + 12)); /* H G F E */
+ x12 = *((uint32*)(ref + 16)); /* L K J I */
+
+ x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24 = 0xFF 0xFF 0xFF ~D */
+ x10 = x10 | (x11 << (32 - SHIFT)); /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */
+ x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */
+ x11 = x11 | (x12 << (32 - SHIFT)); /* ~K ~J ~I ~H */
+
+ x12 = *((uint32*)(blk + 8));
+ x14 = *((uint32*)(blk + 12));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixel(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixel(x10, x12, x9);
+
+ x5 = x5 + x10; /* accumulate low bytes */
+ x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */
+ x5 = x5 + x11; /* accumulate low bytes */
+ x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */
+ x4 = x4 + ((uint32)x11 >> 8); /* accumulate high bytes */
+
+ /****************/
+ x10 = x5 - (x4 << 8); /* extract low bytes */
+ x10 = x10 + x4; /* add with high bytes */
+ x10 = x10 + (x10 << 16); /* add with lower half word */
+
+ if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */
+ {
+ if (--x8)
+ {
+#if (NUMBER==3)
+ goto LOOP_SAD3;
+#elif (NUMBER==2)
+ goto LOOP_SAD2;
+#elif (NUMBER==1)
+ goto LOOP_SAD1;
+#endif
+ }
+
+ }
+
+ return ((uint32)x10 >> 16);
+}
+
+#elif defined(__CC_ARM) /* only work with arm v5 */
+
+#if (NUMBER==3)
+__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)
+#elif (NUMBER==2)
+__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)
+#elif (NUMBER==1)
+__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)
+#endif
+{
+ int32 x4, x5, x6, x9, x10, x11, x12, x14;
+
+ x9 = 0x80808080; /* const. */
+ x4 = x5 = 0;
+
+ __asm{
+ MVN x6, #0xff0000;
+#if (NUMBER==3)
+LOOP_SAD3:
+#elif (NUMBER==2)
+LOOP_SAD2:
+#elif (NUMBER==1)
+LOOP_SAD1:
+#endif
+ BIC ref, ref, #3;
+ }
+ /****** process 8 pixels ******/
+ x11 = *((int32*)(ref + 12));
+ x12 = *((int32*)(ref + 16));
+ x10 = *((int32*)(ref + 8));
+ x14 = *((int32*)(blk + 12));
+
+ __asm{
+ MVN x10, x10, lsr #SHIFT;
+ BIC x10, x10, x11, lsl #(32-SHIFT);
+ MVN x11, x11, lsr #SHIFT;
+ BIC x11, x11, x12, lsl #(32-SHIFT);
+
+ LDR x12, [blk, #8];
+ }
+
+ /* process x11 & x14 */
+ x11 = sad_4pixelN(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixelN(x10, x12, x9);
+
+ sum_accumulate;
+
+ __asm{
+ /****** process 8 pixels ******/
+ LDR x11, [ref, #4];
+ LDR x12, [ref, #8];
+ LDR x10, [ref], lx ;
+ LDR x14, [blk, #4];
+
+ MVN x10, x10, lsr #SHIFT;
+ BIC x10, x10, x11, lsl #(32-SHIFT);
+ MVN x11, x11, lsr #SHIFT;
+ BIC x11, x11, x12, lsl #(32-SHIFT);
+
+ LDR x12, [blk], #16;
+ }
+
+ /* process x11 & x14 */
+ x11 = sad_4pixelN(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixelN(x10, x12, x9);
+
+ sum_accumulate;
+
+ /****************/
+ x10 = x5 - (x4 << 8); /* extract low bytes */
+ x10 = x10 + x4; /* add with high bytes */
+ x10 = x10 + (x10 << 16); /* add with lower half word */
+
+ __asm{
+ RSBS x11, dmin, x10, lsr #16
+ ADDLSS x8, x8, #INC_X8
+#if (NUMBER==3)
+ BLS LOOP_SAD3;
+#elif (NUMBER==2)
+BLS LOOP_SAD2;
+#elif (NUMBER==1)
+BLS LOOP_SAD1;
+#endif
+ }
+
+ return ((uint32)x10 >> 16);
+}
+
+#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER */
+
+#if (NUMBER==3)
+__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin)
+#elif (NUMBER==2)
+__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin)
+#elif (NUMBER==1)
+__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)
+#endif
+{
+ int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;
+
+ x9 = 0x80808080; /* const. */
+ x4 = x5 = 0;
+ x8 = 16; //<<===========*******
+
+__asm__ volatile("MVN %0, #0xFF0000": "=r"(x6));
+
+#if (NUMBER==3)
+LOOP_SAD3:
+#elif (NUMBER==2)
+LOOP_SAD2:
+#elif (NUMBER==1)
+LOOP_SAD1:
+#endif
+__asm__ volatile("BIC %0, %0, #3": "=r"(ref));
+ /****** process 8 pixels ******/
+ x11 = *((int32*)(ref + 12));
+ x12 = *((int32*)(ref + 16));
+ x10 = *((int32*)(ref + 8));
+ x14 = *((int32*)(blk + 12));
+
+#if (SHIFT==8)
+__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12));
+#elif (SHIFT==16)
+__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12));
+#elif (SHIFT==24)
+__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12));
+#endif
+
+ x12 = *((int32*)(blk + 8));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixelN(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixelN(x10, x12, x9);
+
+ sum_accumulate;
+
+ /****** process 8 pixels ******/
+ x11 = *((int32*)(ref + 4));
+ x12 = *((int32*)(ref + 8));
+ x10 = *((int32*)ref); ref += lx;
+ x14 = *((int32*)(blk + 4));
+
+#if (SHIFT==8)
+__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12));
+#elif (SHIFT==16)
+__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12));
+#elif (SHIFT==24)
+__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12));
+#endif
+__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk));
+
+ /* process x11 & x14 */
+ x11 = sad_4pixelN(x11, x14, x9);
+
+ /* process x12 & x10 */
+ x10 = sad_4pixelN(x10, x12, x9);
+
+ sum_accumulate;
+
+ /****************/
+ x10 = x5 - (x4 << 8); /* extract low bytes */
+ x10 = x10 + x4; /* add with high bytes */
+ x10 = x10 + (x10 << 16); /* add with lower half word */
+
+ if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */
+ {
+ if (--x8)
+ {
+#if (NUMBER==3)
+ goto LOOP_SAD3;
+#elif (NUMBER==2)
+goto LOOP_SAD2;
+#elif (NUMBER==1)
+goto LOOP_SAD1;
+#endif
+ }
+
+ }
+
+ return ((uint32)x10 >> 16);
+}
+
+#endif
+
diff --git a/media/libstagefright/codecs/avc/enc/src/slice.cpp b/media/libstagefright/codecs/avc/enc/src/slice.cpp
new file mode 100644
index 0000000..f6d066e
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/slice.cpp
@@ -0,0 +1,1025 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+
+AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ AVCCommonObj *video = encvid->common;
+ AVCPicParamSet *pps = video->currPicParams;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ AVCMacroblock *currMB ;
+ AVCEncBitstream *stream = encvid->bitstream;
+ uint slice_group_id;
+ int CurrMbAddr, slice_type;
+
+ slice_type = video->slice_type;
+
+ /* set the first mb in slice */
+ video->mbNum = CurrMbAddr = sliceHdr->first_mb_in_slice;// * (1+video->MbaffFrameFlag);
+ slice_group_id = video->MbToSliceGroupMap[CurrMbAddr];
+
+ video->mb_skip_run = 0;
+
+ /* while loop , see subclause 7.3.4 */
+ while (1)
+ {
+ video->mbNum = CurrMbAddr;
+ currMB = video->currMB = &(video->mblock[CurrMbAddr]);
+ currMB->slice_id = video->slice_id; // for deblocking
+
+ video->mb_x = CurrMbAddr % video->PicWidthInMbs;
+ video->mb_y = CurrMbAddr / video->PicWidthInMbs;
+
+ /* initialize QP for this MB here*/
+ /* calculate currMB->QPy */
+ RCInitMBQP(encvid);
+
+ /* check the availability of neighboring macroblocks */
+ InitNeighborAvailability(video, CurrMbAddr);
+
+ /* Assuming that InitNeighborAvailability has been called prior to this function */
+ video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;
+ /* this is necessary for all subsequent intra search */
+
+ if (!video->currPicParams->constrained_intra_pred_flag)
+ {
+ video->intraAvailA = video->mbAvailA;
+ video->intraAvailB = video->mbAvailB;
+ video->intraAvailC = video->mbAvailC;
+ video->intraAvailD = video->mbAvailD;
+ }
+ else
+ {
+ if (video->mbAvailA)
+ {
+ video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;
+ }
+ if (video->mbAvailB)
+ {
+ video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;
+ }
+ if (video->mbAvailC)
+ {
+ video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;
+ }
+ if (video->mbAvailD)
+ {
+ video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;
+ }
+ }
+
+ /* encode_one_macroblock() */
+ status = EncodeMB(encvid);
+ if (status != AVCENC_SUCCESS)
+ {
+ break;
+ }
+
+ /* go to next MB */
+ CurrMbAddr++;
+
+ while ((uint)video->MbToSliceGroupMap[CurrMbAddr] != slice_group_id &&
+ (uint)CurrMbAddr < video->PicSizeInMbs)
+ {
+ CurrMbAddr++;
+ }
+
+ if ((uint)CurrMbAddr >= video->PicSizeInMbs)
+ {
+ /* end of slice, return, but before that check to see if there are other slices
+ to be encoded. */
+ encvid->currSliceGroup++;
+ if (encvid->currSliceGroup > (int)pps->num_slice_groups_minus1) /* no more slice group */
+ {
+ status = AVCENC_PICTURE_READY;
+ break;
+ }
+ else
+ {
+ /* find first_mb_num for the next slice */
+ CurrMbAddr = 0;
+ while (video->MbToSliceGroupMap[CurrMbAddr] != encvid->currSliceGroup &&
+ (uint)CurrMbAddr < video->PicSizeInMbs)
+ {
+ CurrMbAddr++;
+ }
+ if ((uint)CurrMbAddr >= video->PicSizeInMbs)
+ {
+ status = AVCENC_SLICE_EMPTY; /* error, one slice group has no MBs in it */
+ }
+
+ video->mbNum = CurrMbAddr;
+ status = AVCENC_SUCCESS;
+ break;
+ }
+ }
+ }
+
+ if (video->mb_skip_run > 0)
+ {
+ /* write skip_run */
+ if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)
+ {
+ ue_v(stream, video->mb_skip_run);
+ video->mb_skip_run = 0;
+ }
+ else /* shouldn't happen */
+ {
+ status = AVCENC_FAIL;
+ }
+ }
+
+ return status;
+}
+
+
+AVCEnc_Status EncodeMB(AVCEncObject *encvid)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ AVCCommonObj *video = encvid->common;
+ AVCPictureData *currPic = video->currPic;
+ AVCFrameIO *currInput = encvid->currInput;
+ AVCMacroblock *currMB = video->currMB;
+ AVCMacroblock *MB_A, *MB_B;
+ AVCEncBitstream *stream = encvid->bitstream;
+ AVCRateControl *rateCtrl = encvid->rateCtrl;
+ uint8 *cur, *curL, *curCb, *curCr;
+ uint8 *orgL, *orgCb, *orgCr, *org4;
+ int CurrMbAddr = video->mbNum;
+ int picPitch = currPic->pitch;
+ int orgPitch = currInput->pitch;
+ int x_position = (video->mb_x << 4);
+ int y_position = (video->mb_y << 4);
+ int offset;
+ int b8, b4, blkidx;
+ AVCResidualType resType;
+ int slice_type;
+ int numcoeff; /* output from residual_block_cavlc */
+ int cost16, cost8;
+
+ int num_bits, start_mb_bits, start_text_bits;
+
+ slice_type = video->slice_type;
+
+ /* now, point to the reconstructed frame */
+ offset = y_position * picPitch + x_position;
+ curL = currPic->Sl + offset;
+ orgL = currInput->YCbCr[0] + offset;
+ offset = (offset + x_position) >> 2;
+ curCb = currPic->Scb + offset;
+ curCr = currPic->Scr + offset;
+ orgCb = currInput->YCbCr[1] + offset;
+ orgCr = currInput->YCbCr[2] + offset;
+
+ if (orgPitch != picPitch)
+ {
+ offset = y_position * (orgPitch - picPitch);
+ orgL += offset;
+ offset >>= 2;
+ orgCb += offset;
+ orgCr += offset;
+ }
+
+ /******* determine MB prediction mode *******/
+ if (encvid->intraSearch[CurrMbAddr])
+ {
+ MBIntraSearch(encvid, CurrMbAddr, curL, picPitch);
+ }
+ /******* This part should be determined somehow ***************/
+ if (currMB->mbMode == AVC_I_PCM)
+ {
+ /* write down mb_type and PCM data */
+ /* and copy from currInput to currPic */
+ status = EncodeIntraPCM(encvid);
+
+
+ return status;
+ }
+
+ /****** for intra prediction, pred is already done *******/
+ /****** for I4, the recon is ready and Xfrm coefs are ready to be encoded *****/
+
+ //RCCalculateMAD(encvid,currMB,orgL,orgPitch); // no need to re-calculate MAD for Intra
+ // not used since totalSAD is used instead
+
+ /* compute the prediction */
+ /* output is video->pred_block */
+ if (!currMB->mb_intra)
+ {
+ AVCMBMotionComp(encvid, video); /* perform prediction and residue calculation */
+ /* we can do the loop here and call dct_luma */
+ video->pred_pitch = picPitch;
+ currMB->CBP = 0;
+ cost16 = 0;
+ cur = curL;
+ org4 = orgL;
+
+ for (b8 = 0; b8 < 4; b8++)
+ {
+ cost8 = 0;
+
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ blkidx = blkIdx2blkXY[b8][b4];
+ video->pred_block = cur;
+ numcoeff = dct_luma(encvid, blkidx, cur, org4, &cost8);
+ currMB->nz_coeff[blkidx] = numcoeff;
+ if (numcoeff)
+ {
+ video->cbp4x4 |= (1 << blkidx);
+ currMB->CBP |= (1 << b8);
+ }
+
+ if (b4&1)
+ {
+ cur += ((picPitch << 2) - 4);
+ org4 += ((orgPitch << 2) - 4);
+ }
+ else
+ {
+ cur += 4;
+ org4 += 4;
+ }
+ }
+
+ /* move the IDCT part out of dct_luma to accommodate the check
+ for coeff_cost. */
+
+ if ((currMB->CBP&(1 << b8)) && (cost8 <= _LUMA_COEFF_COST_))
+ {
+ cost8 = 0; // reset it
+
+ currMB->CBP ^= (1 << b8);
+ blkidx = blkIdx2blkXY[b8][0];
+
+ currMB->nz_coeff[blkidx] = 0;
+ currMB->nz_coeff[blkidx+1] = 0;
+ currMB->nz_coeff[blkidx+4] = 0;
+ currMB->nz_coeff[blkidx+5] = 0;
+ }
+
+ cost16 += cost8;
+
+ if (b8&1)
+ {
+ cur -= 8;
+ org4 -= 8;
+ }
+ else
+ {
+ cur += (8 - (picPitch << 3));
+ org4 += (8 - (orgPitch << 3));
+ }
+ }
+
+ /* after the whole MB, we do another check for coeff_cost */
+ if ((currMB->CBP&0xF) && (cost16 <= _LUMA_MB_COEFF_COST_))
+ {
+ currMB->CBP = 0; // reset it to zero
+ memset(currMB->nz_coeff, 0, sizeof(uint8)*16);
+ }
+
+ // now we do IDCT
+ MBInterIdct(video, curL, currMB, picPitch);
+
+// video->pred_block = video->pred + 256;
+ }
+ else /* Intra prediction */
+ {
+ encvid->numIntraMB++;
+
+ if (currMB->mbMode == AVC_I16) /* do prediction for the whole macroblock */
+ {
+ currMB->CBP = 0;
+ /* get the prediction from encvid->pred_i16 */
+ dct_luma_16x16(encvid, curL, orgL);
+ }
+ video->pred_block = encvid->pred_ic[currMB->intra_chroma_pred_mode];
+ }
+
+ /* chrominance */
+ /* not need to do anything, the result is in encvid->pred_ic
+ chroma dct must be aware that prediction block can come from either intra or inter. */
+
+ dct_chroma(encvid, curCb, orgCb, 0);
+
+ dct_chroma(encvid, curCr, orgCr, 1);
+
+
+ /* 4.1 if there's nothing in there, video->mb_skip_run++ */
+ /* 4.2 if coded, check if there is a run of skipped MB, encodes it,
+ set video->QPyprev = currMB->QPy; */
+
+ /* 5. vlc encode */
+
+ /* check for skipped macroblock, INTER only */
+ if (!currMB->mb_intra)
+ {
+ /* decide whether this MB (for inter MB) should be skipped if there's nothing left. */
+ if (!currMB->CBP && currMB->NumMbPart == 1 && currMB->QPy == video->QPy)
+ {
+ if (currMB->MBPartPredMode[0][0] == AVC_Pred_L0 && currMB->ref_idx_L0[0] == 0)
+ {
+ MB_A = &video->mblock[video->mbAddrA];
+ MB_B = &video->mblock[video->mbAddrB];
+
+ if (!video->mbAvailA || !video->mbAvailB)
+ {
+ if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/
+ {
+ currMB->mbMode = AVC_SKIP;
+ video->mvd_l0[0][0][0] = 0;
+ video->mvd_l0[0][0][1] = 0;
+ }
+ }
+ else
+ {
+ if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) ||
+ (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0))
+ {
+ if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/
+ {
+ currMB->mbMode = AVC_SKIP;
+ video->mvd_l0[0][0][0] = 0;
+ video->mvd_l0[0][0][1] = 0;
+ }
+ }
+ else if (video->mvd_l0[0][0][0] == 0 && video->mvd_l0[0][0][1] == 0)
+ {
+ currMB->mbMode = AVC_SKIP;
+ }
+ }
+ }
+
+ if (currMB->mbMode == AVC_SKIP)
+ {
+ video->mb_skip_run++;
+
+ /* set parameters */
+ /* not sure whether we need the followings */
+ if (slice_type == AVC_P_SLICE)
+ {
+ currMB->mbMode = AVC_SKIP;
+ currMB->MbPartWidth = currMB->MbPartHeight = 16;
+ currMB->MBPartPredMode[0][0] = AVC_Pred_L0;
+ currMB->NumMbPart = 1;
+ currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =
+ currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1;
+ currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =
+ currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;
+ currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =
+ currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;
+
+ }
+ else if (slice_type == AVC_B_SLICE)
+ {
+ currMB->mbMode = AVC_SKIP;
+ currMB->MbPartWidth = currMB->MbPartHeight = 8;
+ currMB->MBPartPredMode[0][0] = AVC_Direct;
+ currMB->NumMbPart = -1;
+ }
+
+ /* for skipped MB, always look at the first entry in RefPicList */
+ currMB->RefIdx[0] = currMB->RefIdx[1] =
+ currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;
+
+ /* do not return yet, need to do some copies */
+ }
+ }
+ }
+ /* non-skipped MB */
+
+
+ /************* START ENTROPY CODING *************************/
+
+ start_mb_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;
+
+ /* encode mb_type, mb_pred, sub_mb_pred, CBP */
+ if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE && currMB->mbMode != AVC_SKIP)
+ {
+ //if(!pps->entropy_coding_mode_flag) ALWAYS true
+ {
+ ue_v(stream, video->mb_skip_run);
+ video->mb_skip_run = 0;
+ }
+ }
+
+ if (currMB->mbMode != AVC_SKIP)
+ {
+ status = EncodeMBHeader(currMB, encvid);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+
+ start_text_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;
+
+ /**** now decoding part *******/
+ resType = AVC_Luma;
+
+ /* DC transform for luma I16 mode */
+ if (currMB->mbMode == AVC_I16)
+ {
+ /* vlc encode level/run */
+ status = enc_residual_block(encvid, AVC_Intra16DC, encvid->numcoefdc, currMB);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ resType = AVC_Intra16AC;
+ }
+
+ /* VLC encoding for luma */
+ for (b8 = 0; b8 < 4; b8++)
+ {
+ if (currMB->CBP&(1 << b8))
+ {
+ for (b4 = 0; b4 < 4; b4++)
+ {
+ /* vlc encode level/run */
+ status = enc_residual_block(encvid, resType, (b8 << 2) + b4, currMB);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+ }
+ }
+
+ /* chroma */
+ if (currMB->CBP & (3 << 4)) /* chroma DC residual present */
+ {
+ for (b8 = 0; b8 < 2; b8++) /* for iCbCr */
+ {
+ /* vlc encode level/run */
+ status = enc_residual_block(encvid, AVC_ChromaDC, encvid->numcoefcdc[b8] + (b8 << 3), currMB);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+ }
+
+ if (currMB->CBP & (2 << 4))
+ {
+ /* AC part */
+ for (b8 = 0; b8 < 2; b8++) /* for iCbCr */
+ {
+ for (b4 = 0; b4 < 4; b4++) /* for each block inside Cb or Cr */
+ {
+ /* vlc encode level/run */
+ status = enc_residual_block(encvid, AVC_ChromaAC, 16 + (b8 << 2) + b4, currMB);
+ if (status != AVCENC_SUCCESS)
+ {
+ return status;
+ }
+ }
+ }
+ }
+
+
+ num_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;
+
+ RCPostMB(video, rateCtrl, start_text_bits - start_mb_bits,
+ num_bits - start_text_bits);
+
+// num_bits -= start_mb_bits;
+// fprintf(fdebug,"MB #%d: %d bits\n",CurrMbAddr,num_bits);
+// fclose(fdebug);
+ return status;
+}
+
+/* copy the content from predBlock back to the reconstructed YUV frame */
+void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picPitch)
+{
+ int j, offset;
+ uint32 *dst, *dst2, *src;
+
+ dst = (uint32*)curL;
+ src = (uint32*)predBlock;
+
+ offset = (picPitch - 16) >> 2;
+
+ for (j = 0; j < 16; j++)
+ {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+
+ dst += offset;
+ }
+
+ dst = (uint32*)curCb;
+ dst2 = (uint32*)curCr;
+ offset >>= 1;
+
+ for (j = 0; j < 8; j++)
+ {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst2++ = *src++;
+ *dst2++ = *src++;
+
+ dst += offset;
+ dst2 += offset;
+ }
+ return ;
+}
+
+/* encode mb_type, mb_pred, sub_mb_pred, CBP */
+/* decide whether this MB (for inter MB) should be skipped */
+AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *encvid)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ uint mb_type;
+ AVCCommonObj *video = encvid->common;
+ AVCEncBitstream *stream = encvid->bitstream;
+
+ if (currMB->CBP > 47) /* chroma CBP is 11 */
+ {
+ currMB->CBP -= 16; /* remove the 5th bit from the right */
+ }
+
+ mb_type = InterpretMBType(currMB, video->slice_type);
+
+ status = ue_v(stream, mb_type);
+
+ if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)
+ {
+ status = sub_mb_pred(video, currMB, stream);
+ }
+ else
+ {
+ status = mb_pred(video, currMB, stream) ;
+ }
+
+ if (currMB->mbMode != AVC_I16)
+ {
+ /* decode coded_block_pattern */
+ status = EncodeCBP(currMB, stream);
+ }
+
+ /* calculate currMB->mb_qp_delta = currMB->QPy - video->QPyprev */
+ if (currMB->CBP > 0 || currMB->mbMode == AVC_I16)
+ {
+ status = se_v(stream, currMB->QPy - video->QPy);
+ video->QPy = currMB->QPy; /* = (video->QPyprev + currMB->mb_qp_delta + 52)%52; */
+ // no need video->QPc = currMB->QPc;
+ }
+ else
+ {
+ if (currMB->QPy != video->QPy) // current QP is not the same as previous QP
+ {
+ /* restore these values */
+ RCRestoreQP(currMB, video, encvid);
+ }
+ }
+
+ return status;
+}
+
+
+/* inputs are mbMode, mb_intra, i16Mode, CBP, NumMbPart, MbPartWidth, MbPartHeight */
+uint InterpretMBType(AVCMacroblock *currMB, int slice_type)
+{
+ int CBP_chrom;
+ int mb_type;// part1, part2, part3;
+// const static int MapParts2Type[2][3][3]={{{4,8,12},{10,6,14},{16,18,20}},
+// {{5,9,13},{11,7,15},{17,19,21}}};
+
+ if (currMB->mb_intra)
+ {
+ if (currMB->mbMode == AVC_I4)
+ {
+ mb_type = 0;
+ }
+ else if (currMB->mbMode == AVC_I16)
+ {
+ CBP_chrom = (currMB->CBP & 0x30);
+ if (currMB->CBP&0xF)
+ {
+ currMB->CBP |= 0xF; /* either 0x0 or 0xF */
+ mb_type = 13;
+ }
+ else
+ {
+ mb_type = 1;
+ }
+ mb_type += (CBP_chrom >> 2) + currMB->i16Mode;
+ }
+ else /* if(currMB->mbMode == AVC_I_PCM) */
+ {
+ mb_type = 25;
+ }
+ }
+ else
+ { /* P-MB *//* note that the order of the enum AVCMBMode cannot be changed
+ since we use it here. */
+ mb_type = currMB->mbMode - AVC_P16;
+ }
+
+ if (slice_type == AVC_P_SLICE)
+ {
+ if (currMB->mb_intra)
+ {
+ mb_type += 5;
+ }
+ }
+ // following codes have not been tested yet, not needed.
+ /* else if(slice_type == AVC_B_SLICE)
+ {
+ if(currMB->mbMode == AVC_BDirect16)
+ {
+ mb_type = 0;
+ }
+ else if(currMB->mbMode == AVC_P16)
+ {
+ mb_type = currMB->MBPartPredMode[0][0] + 1; // 1 or 2
+ }
+ else if(currMB->mbMode == AVC_P8)
+ {
+ mb_type = 26;
+ }
+ else if(currMB->mbMode == AVC_P8ref0)
+ {
+ mb_type = 27;
+ }
+ else
+ {
+ part1 = currMB->mbMode - AVC_P16x8;
+ part2 = currMB->MBPartPredMode[0][0];
+ part3 = currMB->MBPartPredMode[1][0];
+ mb_type = MapParts2Type[part1][part2][part3];
+ }
+ }
+
+ if(slice_type == AVC_SI_SLICE)
+ {
+ mb_type++;
+ }
+ */
+ return (uint)mb_type;
+}
+
+//const static int mbPart2raster[3][4] = {{0,0,0,0},{1,1,0,0},{1,0,1,0}};
+
+/* see subclause 7.3.5.1 */
+AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ int mbPartIdx;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ int max_ref_idx;
+ uint code;
+
+ if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)
+ {
+ if (currMB->mbMode == AVC_I4)
+ {
+ /* perform prediction to get the actual intra 4x4 pred mode */
+ EncodeIntra4x4Mode(video, currMB, stream);
+ /* output will be in currMB->i4Mode[4][4] */
+ }
+
+ /* assume already set from MBPrediction() */
+ status = ue_v(stream, currMB->intra_chroma_pred_mode);
+ }
+ else if (currMB->MBPartPredMode[0][0] != AVC_Direct)
+ {
+
+ memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);
+
+ /* see subclause 7.4.5.1 for the range of ref_idx_lX */
+ max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;
+ /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
+ max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1;
+ */
+ /* decode ref index for L0 */
+ if (sliceHdr->num_ref_idx_l0_active_minus1 > 0)
+ {
+ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
+ {
+ if (/*(sliceHdr->num_ref_idx_l0_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/
+ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
+ {
+ code = currMB->ref_idx_L0[mbPartIdx];
+ status = te_v(stream, code, max_ref_idx);
+ }
+ }
+ }
+
+ /* see subclause 7.4.5.1 for the range of ref_idx_lX */
+ max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;
+ /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
+ max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;
+ */
+ /* decode ref index for L1 */
+ if (sliceHdr->num_ref_idx_l1_active_minus1 > 0)
+ {
+ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
+ {
+ if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/
+ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
+ {
+ status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx);
+ }
+ }
+ }
+
+ /* encode mvd_l0 */
+ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
+ {
+ if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
+ {
+ status = se_v(stream, video->mvd_l0[mbPartIdx][0][0]);
+ status = se_v(stream, video->mvd_l0[mbPartIdx][0][1]);
+ }
+ }
+ /* encode mvd_l1 */
+ for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)
+ {
+ if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
+ {
+ status = se_v(stream, video->mvd_l1[mbPartIdx][0][0]);
+ status = se_v(stream, video->mvd_l1[mbPartIdx][0][1]);
+ }
+ }
+ }
+
+ return status;
+}
+
+/* see subclause 7.3.5.2 */
+AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)
+{
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ int mbPartIdx, subMbPartIdx;
+ AVCSliceHeader *sliceHdr = video->sliceHdr;
+ uint max_ref_idx;
+ uint slice_type = video->slice_type;
+ uint sub_mb_type[4];
+
+ /* this should move somewhere else where we don't have to make this check */
+ if (currMB->mbMode == AVC_P8ref0)
+ {
+ memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);
+ }
+
+ /* we have to check the values to make sure they are valid */
+ /* assign values to currMB->sub_mb_type[] */
+ if (slice_type == AVC_P_SLICE)
+ {
+ InterpretSubMBTypeP(currMB, sub_mb_type);
+ }
+ /* no need to check for B-slice
+ else if(slice_type == AVC_B_SLICE)
+ {
+ InterpretSubMBTypeB(currMB,sub_mb_type);
+ }*/
+
+ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
+ {
+ status = ue_v(stream, sub_mb_type[mbPartIdx]);
+ }
+
+ /* see subclause 7.4.5.1 for the range of ref_idx_lX */
+ max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;
+ /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
+ max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1; */
+
+ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
+ {
+ if ((sliceHdr->num_ref_idx_l0_active_minus1 > 0 /*|| currMB->mb_field_decoding_flag*/) &&
+ currMB->mbMode != AVC_P8ref0 && /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
+ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
+ {
+ status = te_v(stream, currMB->ref_idx_L0[mbPartIdx], max_ref_idx);
+ }
+ /* used in deblocking */
+ currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx;
+ }
+ /* see subclause 7.4.5.1 for the range of ref_idx_lX */
+ max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;
+ /* if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)
+ max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/
+
+ if (sliceHdr->num_ref_idx_l1_active_minus1 > 0)
+ {
+ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
+ {
+ if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/
+ /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
+ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
+ {
+ status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx);
+ }
+ }
+ }
+
+ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
+ {
+ if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
+ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)
+ {
+ for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
+ {
+ status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][0]);
+ status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][1]);
+ }
+ }
+ }
+
+ for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)
+ {
+ if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/
+ currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)
+ {
+ for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)
+ {
+ status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][0]);
+ status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][1]);
+ }
+ }
+ }
+
+ return status;
+}
+
+/* input is mblock->sub_mb_type[] */
+void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type)
+{
+ int i;
+ /* see enum AVCMBType declaration */
+ /*const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4};
+ const static int map2subPartWidth[4] = {8,8,4,4};
+ const static int map2subPartHeight[4] = {8,4,8,4};
+ const static int map2numSubPart[4] = {1,2,2,4};*/
+
+ for (i = 0; i < 4 ; i++)
+ {
+ sub_mb_type[i] = mblock->subMbMode[i] - AVC_8x8;
+ }
+
+ return ;
+}
+
+void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type)
+{
+ int i;
+ /* see enum AVCMBType declaration */
+ /* const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8,AVC_8x8,AVC_8x8,
+ AVC_8x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_4x4,AVC_4x4,AVC_4x4};
+ const static int map2subPartWidth[13] = {4,8,8,8,8,4,8,4,8,4,4,4,4};
+ const static int map2subPartHeight[13] = {4,8,8,8,4,8,4,8,4,8,4,4,4};
+ const static int map2numSubPart[13] = {4,1,1,1,2,2,2,2,2,2,4,4,4};
+ const static int map2predMode[13] = {3,0,1,2,0,0,1,1,2,2,0,1,2};*/
+
+ for (i = 0; i < 4 ; i++)
+ {
+ if (mblock->subMbMode[i] == AVC_BDirect8)
+ {
+ sub_mb_type[i] = 0;
+ }
+ else if (mblock->subMbMode[i] == AVC_8x8)
+ {
+ sub_mb_type[i] = 1 + mblock->MBPartPredMode[i][0];
+ }
+ else if (mblock->subMbMode[i] == AVC_4x4)
+ {
+ sub_mb_type[i] = 10 + mblock->MBPartPredMode[i][0];
+ }
+ else
+ {
+ sub_mb_type[i] = 4 + (mblock->MBPartPredMode[i][0] << 1) + (mblock->subMbMode[i] - AVC_8x4);
+ }
+ }
+
+ return ;
+}
+
+/* see subclause 8.3.1 */
+AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)
+{
+ int intra4x4PredModeA = 0;
+ int intra4x4PredModeB, predIntra4x4PredMode;
+ int component, SubBlock_indx, block_x, block_y;
+ int dcOnlyPredictionFlag;
+ uint flag;
+ int rem = 0;
+ int mode;
+ int bindx = 0;
+
+ for (component = 0; component < 4; component++) /* partition index */
+ {
+ block_x = ((component & 1) << 1);
+ block_y = ((component >> 1) << 1);
+
+ for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */
+ {
+ dcOnlyPredictionFlag = 0;
+ if (block_x > 0)
+ {
+ intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ];
+ }
+ else
+ {
+ if (video->intraAvailA)
+ {
+ if (video->mblock[video->mbAddrA].mbMode == AVC_I4)
+ {
+ intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3];
+ }
+ else
+ {
+ intra4x4PredModeA = AVC_I4_DC;
+ }
+ }
+ else
+ {
+ dcOnlyPredictionFlag = 1;
+ }
+ }
+
+ if (block_y > 0)
+ {
+ intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x];
+ }
+ else
+ {
+ if (video->intraAvailB)
+ {
+ if (video->mblock[video->mbAddrB].mbMode == AVC_I4)
+ {
+ intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x];
+ }
+ else
+ {
+ intra4x4PredModeB = AVC_I4_DC;
+ }
+ }
+ else
+ {
+ dcOnlyPredictionFlag = 1;
+ }
+ }
+
+ if (dcOnlyPredictionFlag)
+ {
+ intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;
+ }
+
+ predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);
+
+ flag = 0;
+ mode = currMB->i4Mode[(block_y<<2)+block_x];
+
+ if (mode == (AVCIntra4x4PredMode)predIntra4x4PredMode)
+ {
+ flag = 1;
+ }
+ else if (mode < predIntra4x4PredMode)
+ {
+ rem = mode;
+ }
+ else
+ {
+ rem = mode - 1;
+ }
+
+ BitstreamWrite1Bit(stream, flag);
+
+ if (!flag)
+ {
+ BitstreamWriteBits(stream, 3, rem);
+ }
+
+ bindx++;
+ block_y += (SubBlock_indx & 1) ;
+ block_x += (1 - 2 * (SubBlock_indx & 1)) ;
+ }
+ }
+
+ return AVCENC_SUCCESS;
+}
+
+
+
diff --git a/media/libstagefright/codecs/avc/enc/src/vlc_encode.cpp b/media/libstagefright/codecs/avc/enc/src/vlc_encode.cpp
new file mode 100644
index 0000000..222e709
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/src/vlc_encode.cpp
@@ -0,0 +1,336 @@
+/* ------------------------------------------------------------------
+ * Copyright (C) 1998-2009 PacketVideo
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ * -------------------------------------------------------------------
+ */
+#include "avcenc_lib.h"
+
+/**
+See algorithm in subclause 9.1, Table 9-1, Table 9-2. */
+AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum)
+{
+ if (AVCENC_SUCCESS != SetEGBitstring(bitstream, codeNum))
+ return AVCENC_FAIL;
+
+ return AVCENC_SUCCESS;
+}
+
+/**
+See subclause 9.1.1, Table 9-3 */
+AVCEnc_Status se_v(AVCEncBitstream *bitstream, int value)
+{
+ uint codeNum;
+ AVCEnc_Status status;
+
+ if (value <= 0)
+ {
+ codeNum = -value * 2;
+ }
+ else
+ {
+ codeNum = value * 2 - 1;
+ }
+
+ status = ue_v(bitstream, codeNum);
+
+ return status;
+}
+
+AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range)
+{
+ AVCEnc_Status status;
+
+ if (range > 1)
+ {
+ return ue_v(bitstream, value);
+ }
+ else
+ {
+ status = BitstreamWrite1Bit(bitstream, 1 - value);
+ return status;
+ }
+}
+
+/**
+See subclause 9.1, Table 9-1, 9-2. */
+// compute leadingZeros and inforbits
+//codeNum = (1<<leadingZeros)-1+infobits;
+AVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum)
+{
+ AVCEnc_Status status;
+ int leadingZeros;
+ int infobits;
+
+ if (!codeNum)
+ {
+ status = BitstreamWrite1Bit(bitstream, 1);
+ return status;
+ }
+
+ /* calculate leadingZeros and infobits */
+ leadingZeros = 1;
+ while ((uint)(1 << leadingZeros) < codeNum + 2)
+ {
+ leadingZeros++;
+ }
+ leadingZeros--;
+ infobits = codeNum - (1 << leadingZeros) + 1;
+
+ status = BitstreamWriteBits(bitstream, leadingZeros, 0);
+ infobits |= (1 << leadingZeros);
+ status = BitstreamWriteBits(bitstream, leadingZeros + 1, infobits);
+ return status;
+}
+
+/* see Table 9-4 assignment of codeNum to values of coded_block_pattern. */
+const static uint8 MapCBP2code[48][2] =
+{
+ {3, 0}, {29, 2}, {30, 3}, {17, 7}, {31, 4}, {18, 8}, {37, 17}, {8, 13}, {32, 5}, {38, 18}, {19, 9}, {9, 14},
+ {20, 10}, {10, 15}, {11, 16}, {2, 11}, {16, 1}, {33, 32}, {34, 33}, {21, 36}, {35, 34}, {22, 37}, {39, 44}, {4, 40},
+ {36, 35}, {40, 45}, {23, 38}, {5, 41}, {24, 39}, {6, 42}, {7, 43}, {1, 19}, {41, 6}, {42, 24}, {43, 25}, {25, 20},
+ {44, 26}, {26, 21}, {46, 46}, {12, 28}, {45, 27}, {47, 47}, {27, 22}, {13, 29}, {28, 23}, {14, 30}, {15, 31}, {0, 12}
+};
+
+AVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream)
+{
+ AVCEnc_Status status;
+ uint codeNum;
+
+ if (currMB->mbMode == AVC_I4)
+ {
+ codeNum = MapCBP2code[currMB->CBP][0];
+ }
+ else
+ {
+ codeNum = MapCBP2code[currMB->CBP][1];
+ }
+
+ status = ue_v(stream, codeNum);
+
+ return status;
+}
+
+AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC)
+{
+ const static uint8 totCoeffTrailOne[3][4][17][2] =
+ {
+ { // 0702
+ {{1, 1}, {6, 5}, {8, 7}, {9, 7}, {10, 7}, {11, 7}, {13, 15}, {13, 11}, {13, 8}, {14, 15}, {14, 11}, {15, 15}, {15, 11}, {16, 15}, {16, 11}, {16, 7}, {16, 4}},
+ {{0, 0}, {2, 1}, {6, 4}, {8, 6}, {9, 6}, {10, 6}, {11, 6}, {13, 14}, {13, 10}, {14, 14}, {14, 10}, {15, 14}, {15, 10}, {15, 1}, {16, 14}, {16, 10}, {16, 6}},
+ {{0, 0}, {0, 0}, {3, 1}, {7, 5}, {8, 5}, {9, 5}, {10, 5}, {11, 5}, {13, 13}, {13, 9}, {14, 13}, {14, 9}, {15, 13}, {15, 9}, {16, 13}, {16, 9}, {16, 5}},
+ {{0, 0}, {0, 0}, {0, 0}, {5, 3}, {6, 3}, {7, 4}, {8, 4}, {9, 4}, {10, 4}, {11, 4}, {13, 12}, {14, 12}, {14, 8}, {15, 12}, {15, 8}, {16, 12}, {16, 8}},
+ },
+ {
+ {{2, 3}, {6, 11}, {6, 7}, {7, 7}, {8, 7}, {8, 4}, {9, 7}, {11, 15}, {11, 11}, {12, 15}, {12, 11}, {12, 8}, {13, 15}, {13, 11}, {13, 7}, {14, 9}, {14, 7}},
+ {{0, 0}, {2, 2}, {5, 7}, {6, 10}, {6, 6}, {7, 6}, {8, 6}, {9, 6}, {11, 14}, {11, 10}, {12, 14}, {12, 10}, {13, 14}, {13, 10}, {14, 11}, {14, 8}, {14, 6}},
+ {{0, 0}, {0, 0}, {3, 3}, {6, 9}, {6, 5}, {7, 5}, {8, 5}, {9, 5}, {11, 13}, {11, 9}, {12, 13}, {12, 9}, {13, 13}, {13, 9}, {13, 6}, {14, 10}, {14, 5}},
+ {{0, 0}, {0, 0}, {0, 0}, {4, 5}, {4, 4}, {5, 6}, {6, 8}, {6, 4}, {7, 4}, {9, 4}, {11, 12}, {11, 8}, {12, 12}, {13, 12}, {13, 8}, {13, 1}, {14, 4}},
+ },
+ {
+ {{4, 15}, {6, 15}, {6, 11}, {6, 8}, {7, 15}, {7, 11}, {7, 9}, {7, 8}, {8, 15}, {8, 11}, {9, 15}, {9, 11}, {9, 8}, {10, 13}, {10, 9}, {10, 5}, {10, 1}},
+ {{0, 0}, {4, 14}, {5, 15}, {5, 12}, {5, 10}, {5, 8}, {6, 14}, {6, 10}, {7, 14}, {8, 14}, {8, 10}, {9, 14}, {9, 10}, {9, 7}, {10, 12}, {10, 8}, {10, 4}},
+ {{0, 0}, {0, 0}, {4, 13}, {5, 14}, {5, 11}, {5, 9}, {6, 13}, {6, 9}, {7, 13}, {7, 10}, {8, 13}, {8, 9}, {9, 13}, {9, 9}, {10, 11}, {10, 7}, {10, 3}},
+ {{0, 0}, {0, 0}, {0, 0}, {4, 12}, {4, 11}, {4, 10}, {4, 9}, {4, 8}, {5, 13}, {6, 12}, {7, 12}, {8, 12}, {8, 8}, {9, 12}, {10, 10}, {10, 6}, {10, 2}}
+ }
+ };
+
+
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ uint code, len;
+ int vlcnum;
+
+ if (TrailingOnes > 3)
+ {
+ return AVCENC_TRAILINGONES_FAIL;
+ }
+
+ if (nC >= 8)
+ {
+ if (TotalCoeff)
+ {
+ code = ((TotalCoeff - 1) << 2) | (TrailingOnes);
+ }
+ else
+ {
+ code = 3;
+ }
+ status = BitstreamWriteBits(stream, 6, code);
+ }
+ else
+ {
+ if (nC < 2)
+ {
+ vlcnum = 0;
+ }
+ else if (nC < 4)
+ {
+ vlcnum = 1;
+ }
+ else
+ {
+ vlcnum = 2;
+ }
+
+ len = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][0];
+ code = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][1];
+ status = BitstreamWriteBits(stream, len, code);
+ }
+
+ return status;
+}
+
+AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff)
+{
+ const static uint8 totCoeffTrailOneChrom[4][5][2] =
+ {
+ { {2, 1}, {6, 7}, {6, 4}, {6, 3}, {6, 2}},
+ { {0, 0}, {1, 1}, {6, 6}, {7, 3}, {8, 3}},
+ { {0, 0}, {0, 0}, {3, 1}, {7, 2}, {8, 2}},
+ { {0, 0}, {0, 0}, {0, 0}, {6, 5}, {7, 0}},
+ };
+
+ AVCEnc_Status status = AVCENC_SUCCESS;
+ uint code, len;
+
+ len = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][0];
+ code = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][1];
+ status = BitstreamWriteBits(stream, len, code);
+
+ return status;
+}
+
+/* see Table 9-7 and 9-8 */
+AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff)
+{
+ const static uint8 lenTotalZeros[15][16] =
+ {
+ { 1, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9},
+ { 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6},
+ { 4, 3, 3, 3, 4, 4, 3, 3, 4, 5, 5, 6, 5, 6},
+ { 5, 3, 4, 4, 3, 3, 3, 4, 3, 4, 5, 5, 5},
+ { 4, 4, 4, 3, 3, 3, 3, 3, 4, 5, 4, 5},
+ { 6, 5, 3, 3, 3, 3, 3, 3, 4, 3, 6},
+ { 6, 5, 3, 3, 3, 2, 3, 4, 3, 6},
+ { 6, 4, 5, 3, 2, 2, 3, 3, 6},
+ { 6, 6, 4, 2, 2, 3, 2, 5},
+ { 5, 5, 3, 2, 2, 2, 4},
+ { 4, 4, 3, 3, 1, 3},
+ { 4, 4, 2, 1, 3},
+ { 3, 3, 1, 2},
+ { 2, 2, 1},
+ { 1, 1},
+ };
+
+ const static uint8 codTotalZeros[15][16] =
+ {
+ {1, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 1},
+ {7, 6, 5, 4, 3, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0},
+ {5, 7, 6, 5, 4, 3, 4, 3, 2, 3, 2, 1, 1, 0},
+ {3, 7, 5, 4, 6, 5, 4, 3, 3, 2, 2, 1, 0},
+ {5, 4, 3, 7, 6, 5, 4, 3, 2, 1, 1, 0},
+ {1, 1, 7, 6, 5, 4, 3, 2, 1, 1, 0},
+ {1, 1, 5, 4, 3, 3, 2, 1, 1, 0},
+ {1, 1, 1, 3, 3, 2, 2, 1, 0},
+ {1, 0, 1, 3, 2, 1, 1, 1, },
+ {1, 0, 1, 3, 2, 1, 1, },
+ {0, 1, 1, 2, 1, 3},
+ {0, 1, 1, 1, 1},
+ {0, 1, 1, 1},
+ {0, 1, 1},
+ {0, 1},
+ };
+ int len, code;
+ AVCEnc_Status status;
+
+ len = lenTotalZeros[TotalCoeff-1][total_zeros];
+ code = codTotalZeros[TotalCoeff-1][total_zeros];
+
+ status = BitstreamWriteBits(stream, len, code);
+
+ return status;
+}
+
+/* see Table 9-9 */
+AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff)
+{
+ const static uint8 lenTotalZerosChromaDC[3][4] =
+ {
+ { 1, 2, 3, 3, },
+ { 1, 2, 2, 0, },
+ { 1, 1, 0, 0, },
+ };
+
+ const static uint8 codTotalZerosChromaDC[3][4] =
+ {
+ { 1, 1, 1, 0, },
+ { 1, 1, 0, 0, },
+ { 1, 0, 0, 0, },
+ };
+
+ int len, code;
+ AVCEnc_Status status;
+
+ len = lenTotalZerosChromaDC[TotalCoeff-1][total_zeros];
+ code = codTotalZerosChromaDC[TotalCoeff-1][total_zeros];
+
+ status = BitstreamWriteBits(stream, len, code);
+
+ return status;
+}
+
+/* see Table 9-10 */
+AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft)
+{
+ const static uint8 lenRunBefore[7][16] =
+ {
+ {1, 1},
+ {1, 2, 2},
+ {2, 2, 2, 2},
+ {2, 2, 2, 3, 3},
+ {2, 2, 3, 3, 3, 3},
+ {2, 3, 3, 3, 3, 3, 3},
+ {3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11},
+ };
+
+ const static uint8 codRunBefore[7][16] =
+ {
+ {1, 0},
+ {1, 1, 0},
+ {3, 2, 1, 0},
+ {3, 2, 1, 1, 0},
+ {3, 2, 3, 2, 1, 0},
+ {3, 0, 1, 3, 2, 5, 4},
+ {7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ };
+
+ int len, code;
+ AVCEnc_Status status;
+
+ if (zerosLeft <= 6)
+ {
+ len = lenRunBefore[zerosLeft-1][run_before];
+ code = codRunBefore[zerosLeft-1][run_before];
+ }
+ else
+ {
+ len = lenRunBefore[6][run_before];
+ code = codRunBefore[6][run_before];
+ }
+
+ status = BitstreamWriteBits(stream, len, code);
+
+
+ return status;
+}
diff --git a/media/libstagefright/include/AVCEncoder.h b/media/libstagefright/include/AVCEncoder.h
new file mode 100644
index 0000000..4fe2e30
--- /dev/null
+++ b/media/libstagefright/include/AVCEncoder.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AVC_ENCODER_H_
+
+#define AVC_ENCODER_H_
+
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/Vector.h>
+
+struct tagAVCHandle;
+struct tagAVCEncParam;
+
+namespace android {
+
+struct MediaBuffer;
+struct MediaBufferGroup;
+
+struct AVCEncoder : public MediaSource,
+ public MediaBufferObserver {
+ AVCEncoder(const sp<MediaSource> &source,
+ const sp<MetaData>& meta);
+
+ virtual status_t start(MetaData *params);
+ virtual status_t stop();
+
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options);
+
+ virtual void signalBufferReturned(MediaBuffer *buffer);
+
+ // Callbacks required by the encoder
+ int32_t allocOutputBuffers(unsigned int sizeInMbs, unsigned int numBuffers);
+ void unbindOutputBuffer(int32_t index);
+ int32_t bindOutputBuffer(int32_t index, uint8_t **yuv);
+
+protected:
+ virtual ~AVCEncoder();
+
+private:
+ sp<MediaSource> mSource;
+ sp<MetaData> mFormat;
+ sp<MetaData> mMeta;
+
+ int32_t mVideoWidth;
+ int32_t mVideoHeight;
+ int32_t mVideoFrameRate;
+ int32_t mVideoBitRate;
+ int32_t mVideoColorFormat;
+ int64_t mNumInputFrames;
+ status_t mInitCheck;
+ bool mStarted;
+ bool mSpsPpsHeaderReceived;
+ bool mReadyForNextFrame;
+ int32_t mIsIDRFrame; // for set kKeyIsSyncFrame
+
+ tagAVCHandle *mHandle;
+ tagAVCEncParam *mEncParams;
+ MediaBuffer *mInputBuffer;
+ uint8_t *mInputFrameData;
+ MediaBufferGroup *mGroup;
+ Vector<MediaBuffer *> mOutputBuffers;
+
+
+ status_t initCheck(const sp<MetaData>& meta);
+ void releaseOutputBuffers();
+
+ AVCEncoder(const AVCEncoder &);
+ AVCEncoder &operator=(const AVCEncoder &);
+};
+
+} // namespace android
+
+#endif // AVC_ENCODER_H_
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index a92cea8..0559812 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -14,8 +14,8 @@
base := $(LOCAL_PATH)/../..
LOCAL_C_INCLUDES := \
- $(base)/libs/audioflinger \
- $(base)/camera/libcameraservice \
+ $(base)/services/audioflinger \
+ $(base)/services/camera/libcameraservice \
$(base)/media/libmediaplayerservice
LOCAL_MODULE:= mediaserver
diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk
index 174ea36..4659709 100644
--- a/media/mtp/Android.mk
+++ b/media/mtp/Android.mk
@@ -23,6 +23,7 @@
MtpDataPacket.cpp \
MtpDebug.cpp \
MtpDevice.cpp \
+ MtpEventPacket.cpp \
MtpDeviceInfo.cpp \
MtpObjectInfo.cpp \
MtpPacket.cpp \
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
index 1566a11..7feb3dc 100644
--- a/media/mtp/MtpDatabase.h
+++ b/media/mtp/MtpDatabase.h
@@ -27,16 +27,25 @@
public:
virtual ~MtpDatabase() {}
- virtual MtpObjectHandle addFile(const char* path,
+ // called from SendObjectInfo to reserve a database entry for the incoming file
+ virtual MtpObjectHandle beginSendObject(const char* path,
MtpObjectFormat format,
MtpObjectHandle parent,
MtpStorageID storage,
uint64_t size,
time_t modified) = 0;
+ // called to report success or failure of the SendObject file transfer
+ // success should signal a notification of the new object's creation,
+ // failure should remove the database entry created in beginSendObject
+ virtual void endSendObject(const char* path,
+ MtpObjectHandle handle,
+ MtpObjectFormat format,
+ bool succeeded) = 0;
+
virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
- MtpObjectFormat format,
- MtpObjectHandle parent) = 0;
+ MtpObjectFormat format,
+ MtpObjectHandle parent) = 0;
virtual MtpResponseCode getObjectProperty(MtpObjectHandle handle,
MtpObjectProperty property,
diff --git a/media/mtp/MtpEventPacket.cpp b/media/mtp/MtpEventPacket.cpp
new file mode 100644
index 0000000..089278e
--- /dev/null
+++ b/media/mtp/MtpEventPacket.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MtpEventPacket"
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+
+#include <linux/usb/f_mtp.h>
+
+#include "MtpEventPacket.h"
+
+namespace android {
+
+MtpEventPacket::MtpEventPacket()
+ : MtpPacket(512)
+{
+}
+
+MtpEventPacket::~MtpEventPacket() {
+}
+
+#ifdef MTP_DEVICE
+int MtpEventPacket::write(int fd) {
+ struct mtp_event event;
+
+ putUInt32(MTP_CONTAINER_LENGTH_OFFSET, mPacketSize);
+ putUInt16(MTP_CONTAINER_TYPE_OFFSET, MTP_CONTAINER_TYPE_EVENT);
+
+ event.data = mBuffer;
+ event.length = mPacketSize;
+ int ret = ::ioctl(fd, MTP_SEND_EVENT, (unsigned long)&event);
+ return (ret < 0 ? ret : 0);
+}
+#endif
+
+#ifdef MTP_HOST
+ // read our buffer from the given endpoint
+int MtpEventPacket::read(struct usb_endpoint *ep) {
+ int ret = transfer(ep, mBuffer, mBufferSize);
+ if (ret >= 0)
+ mPacketSize = ret;
+ else
+ mPacketSize = 0;
+ return ret;
+}
+#endif
+
+} // namespace android
+
diff --git a/media/mtp/MtpEventPacket.h b/media/mtp/MtpEventPacket.h
new file mode 100644
index 0000000..30ae869
--- /dev/null
+++ b/media/mtp/MtpEventPacket.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MTP_EVENT_PACKET_H
+#define _MTP_EVENT_PACKET_H
+
+#include "MtpPacket.h"
+#include "mtp.h"
+
+namespace android {
+
+class MtpEventPacket : public MtpPacket {
+
+public:
+ MtpEventPacket();
+ virtual ~MtpEventPacket();
+
+#ifdef MTP_DEVICE
+ // write our data to the given file descriptor
+ int write(int fd);
+#endif
+
+#ifdef MTP_HOST
+ // read our buffer from the given endpoint
+ int read(struct usb_endpoint *ep);
+#endif
+
+ inline MtpEventCode getEventCode() const { return getContainerCode(); }
+ inline void setEventCode(MtpEventCode code)
+ { return setContainerCode(code); }
+};
+
+}; // namespace android
+
+#endif // _MTP_EVENT_PACKET_H
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 5a16a03..163c05b 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -31,7 +31,7 @@
#include "MtpStorage.h"
#include "MtpStringBuffer.h"
-#include "f_mtp.h"
+#include <linux/usb/f_mtp.h>
namespace android {
@@ -73,6 +73,11 @@
// MTP_OPERATION_SKIP,
};
+static const MtpEventCode kSupportedEventCodes[] = {
+ MTP_EVENT_OBJECT_ADDED,
+ MTP_EVENT_OBJECT_REMOVED,
+};
+
static const MtpObjectProperty kSupportedObjectProperties[] = {
MTP_PROPERTY_STORAGE_ID,
MTP_PROPERTY_OBJECT_FORMAT,
@@ -123,6 +128,7 @@
mSessionID(0),
mSessionOpen(false),
mSendObjectHandle(kInvalidObjectHandle),
+ mSendObjectFormat(0),
mSendObjectFileSize(0)
{
initObjectProperties();
@@ -238,6 +244,24 @@
return NULL;
}
+void MtpServer::sendObjectAdded(MtpObjectHandle handle) {
+ LOGD("sendObjectAdded %d\n", handle);
+ mEvent.setEventCode(MTP_EVENT_OBJECT_ADDED);
+ mEvent.setTransactionID(mRequest.getTransactionID());
+ mEvent.setParameter(1, handle);
+ int ret = mEvent.write(mFD);
+ LOGD("mEvent.write returned %d\n", ret);
+}
+
+void MtpServer::sendObjectRemoved(MtpObjectHandle handle) {
+ LOGD("sendObjectRemoved %d\n", handle);
+ mEvent.setEventCode(MTP_EVENT_OBJECT_REMOVED);
+ mEvent.setTransactionID(mRequest.getTransactionID());
+ mEvent.setParameter(1, handle);
+ int ret = mEvent.write(mFD);
+ LOGD("mEvent.write returned %d\n", ret);
+}
+
void MtpServer::initObjectProperties() {
mObjectProperties.push(new MtpProperty(MTP_PROPERTY_STORAGE_ID, MTP_TYPE_UINT16));
mObjectProperties.push(new MtpProperty(MTP_PROPERTY_OBJECT_FORMAT, MTP_TYPE_UINT16));
@@ -325,7 +349,8 @@
mData.putUInt16(0); //Functional Mode
mData.putAUInt16(kSupportedOperationCodes,
sizeof(kSupportedOperationCodes) / sizeof(uint16_t)); // Operations Supported
- mData.putEmptyArray(); // Events Supported
+ mData.putAUInt16(kSupportedEventCodes,
+ sizeof(kSupportedEventCodes) / sizeof(uint16_t)); // Events Supported
mData.putEmptyArray(); // Device Properties Supported
mData.putEmptyArray(); // Capture Formats
mData.putAUInt16(kSupportedPlaybackFormats,
@@ -519,8 +544,8 @@
path += (const char *)name;
mDatabase->beginTransaction();
- MtpObjectHandle handle = mDatabase->addFile((const char*)path, format, parent, storageID,
- mSendObjectFileSize, modifiedTime);
+ MtpObjectHandle handle = mDatabase->beginSendObject((const char*)path,
+ format, parent, storageID, mSendObjectFileSize, modifiedTime);
if (handle == kInvalidObjectHandle) {
mDatabase->rollbackTransaction();
return MTP_RESPONSE_GENERAL_ERROR;
@@ -538,6 +563,7 @@
mSendObjectFilePath = path;
// save the handle for the SendObject call, which should follow
mSendObjectHandle = handle;
+ mSendObjectFormat = format;
}
mResponse.setParameter(1, storageID);
@@ -548,13 +574,18 @@
}
MtpResponseCode MtpServer::doSendObject() {
+ MtpResponseCode result = MTP_RESPONSE_OK;
+ mode_t mask;
+ int ret;
+
if (mSendObjectHandle == kInvalidObjectHandle) {
LOGE("Expected SendObjectInfo before SendObject");
- return MTP_RESPONSE_NO_VALID_OBJECT_INFO;
+ result = MTP_RESPONSE_NO_VALID_OBJECT_INFO;
+ goto done;
}
// read the header
- int ret = mData.readDataHeader(mFD);
+ ret = mData.readDataHeader(mFD);
// FIXME - check for errors here.
// reset so we don't attempt to send this back
@@ -563,11 +594,12 @@
mtp_file_range mfr;
mfr.fd = open(mSendObjectFilePath, O_RDWR | O_CREAT | O_TRUNC);
if (mfr.fd < 0) {
- return MTP_RESPONSE_GENERAL_ERROR;
+ result = MTP_RESPONSE_GENERAL_ERROR;
+ goto done;
}
fchown(mfr.fd, getuid(), mFileGroup);
// set permissions
- mode_t mask = umask(0);
+ mask = umask(0);
fchmod(mfr.fd, mFilePermission);
umask(mask);
@@ -578,18 +610,22 @@
ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
close(mfr.fd);
- // FIXME - we need to delete mSendObjectHandle from the database if this fails.
LOGV("MTP_RECEIVE_FILE returned %d", ret);
- mSendObjectHandle = kInvalidObjectHandle;
if (ret < 0) {
unlink(mSendObjectFilePath);
if (errno == ECANCELED)
- return MTP_RESPONSE_TRANSACTION_CANCELLED;
+ result = MTP_RESPONSE_TRANSACTION_CANCELLED;
else
- return MTP_RESPONSE_GENERAL_ERROR;
+ result = MTP_RESPONSE_GENERAL_ERROR;
}
- return MTP_RESPONSE_OK;
+
+done:
+ mDatabase->endSendObject(mSendObjectFilePath, mSendObjectHandle, mSendObjectFormat,
+ result == MTP_RESPONSE_OK);
+ mSendObjectHandle = kInvalidObjectHandle;
+ mSendObjectFormat = 0;
+ return result;
}
MtpResponseCode MtpServer::doDeleteObject() {
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index afba846..aff973a 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -20,6 +20,7 @@
#include "MtpRequestPacket.h"
#include "MtpDataPacket.h"
#include "MtpResponsePacket.h"
+#include "MtpEventPacket.h"
#include "mtp.h"
#include "MtpUtils.h"
@@ -52,6 +53,7 @@
MtpRequestPacket mRequest;
MtpDataPacket mData;
MtpResponsePacket mResponse;
+ MtpEventPacket mEvent;
MtpStorageList mStorages;
@@ -60,6 +62,7 @@
// handle for new object, set by SendObjectInfo and used by SendObject
MtpObjectHandle mSendObjectHandle;
+ MtpObjectFormat mSendObjectFormat;
MtpString mSendObjectFilePath;
size_t mSendObjectFileSize;
@@ -76,6 +79,9 @@
MtpProperty* getObjectProperty(MtpPropertyCode propCode);
MtpProperty* getDeviceProperty(MtpPropertyCode propCode);
+ void sendObjectAdded(MtpObjectHandle handle);
+ void sendObjectRemoved(MtpObjectHandle handle);
+
private:
void initObjectProperties();
diff --git a/media/mtp/MtpTypes.h b/media/mtp/MtpTypes.h
index ec0f867..2a895a7 100644
--- a/media/mtp/MtpTypes.h
+++ b/media/mtp/MtpTypes.h
@@ -28,6 +28,7 @@
typedef uint16_t MtpOperationCode;
typedef uint16_t MtpResponseCode;
+typedef uint16_t MtpEventCode;
typedef uint32_t MtpSessionID;
typedef uint32_t MtpStorageID;
typedef uint32_t MtpTransactionID;
diff --git a/media/mtp/f_mtp.h b/media/mtp/f_mtp.h
deleted file mode 100644
index c1c9aef..0000000
--- a/media/mtp/f_mtp.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Gadget Function Driver for MTP
- *
- * Copyright (C) 2010 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __LINUX_USB_F_MTP_H
-#define __LINUX_USB_F_MTP_H
-
-/* Constants for MTP_SET_INTERFACE_MODE */
-#define MTP_INTERFACE_MODE_MTP 0
-#define MTP_INTERFACE_MODE_PTP 1
-
-
-struct mtp_file_range {
- /* file descriptor for file to transfer */
- int fd;
- /* offset in file for start of transfer */
- loff_t offset;
- /* number of bytes to transfer */
- size_t length;
-};
-
-/* Sends the specified file range to the host */
-#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range)
-/* Receives data from the host and writes it to a file.
- * The file is created if it does not exist.
- */
-#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range)
-/* Sets the driver mode to either MTP or PTP */
-#define MTP_SET_INTERFACE_MODE _IOW('M', 2, int)
-
-#endif /* __LINUX_USB_F_MTP_H */
diff --git a/native/android/Android.mk b/native/android/Android.mk
index 509a379..2fe0679 100644
--- a/native/android/Android.mk
+++ b/native/android/Android.mk
@@ -9,13 +9,15 @@
input.cpp \
looper.cpp \
native_activity.cpp \
- native_window.cpp
+ native_window.cpp \
+ sensor.cpp
LOCAL_SHARED_LIBRARIES := \
libcutils \
libutils \
libbinder \
libui \
+ libgui \
libsurfaceflinger_client \
libandroid_runtime
diff --git a/native/android/input.cpp b/native/android/input.cpp
index 89d53e2..a4dde51 100644
--- a/native/android/input.cpp
+++ b/native/android/input.cpp
@@ -22,6 +22,8 @@
#include <ui/InputTransport.h>
#include <utils/PollLoop.h>
+#include <android_runtime/android_app_NativeActivity.h>
+
#include <poll.h>
using android::InputEvent;
@@ -187,65 +189,21 @@
void AInputQueue_attachLooper(AInputQueue* queue, ALooper* looper,
ALooper_callbackFunc* callback, void* data) {
- queue->setPollLoop(static_cast<android::PollLoop*>(looper));
- ALooper_addFd(looper, queue->getConsumer().getChannel()->getReceivePipeFd(),
- POLLIN, callback, data);
+ queue->attachLooper(looper, callback, data);
}
void AInputQueue_detachLooper(AInputQueue* queue) {
- queue->getPollLoop()->removeCallback(
- queue->getConsumer().getChannel()->getReceivePipeFd());
+ queue->detachLooper();
}
int AInputQueue_hasEvents(AInputQueue* queue) {
- struct pollfd pfd;
-
- pfd.fd = queue->getConsumer().getChannel()->getReceivePipeFd();
- pfd.events = POLLIN;
- pfd.revents = 0;
-
- int nfd = poll(&pfd, 1, 0);
- if (nfd <= 0) return nfd;
- return pfd.revents == POLLIN ? 1 : -1;
+ return queue->hasEvents();
}
int32_t AInputQueue_getEvent(AInputQueue* queue, AInputEvent** outEvent) {
- *outEvent = NULL;
-
- int32_t res = queue->getConsumer().receiveDispatchSignal();
- if (res != android::OK) {
- LOGE("channel '%s' ~ Failed to receive dispatch signal. status=%d",
- queue->getConsumer().getChannel()->getName().string(), res);
- return -1;
- }
-
- InputEvent* myEvent = NULL;
- res = queue->consume(&myEvent);
- if (res != android::OK) {
- LOGW("channel '%s' ~ Failed to consume input event. status=%d",
- queue->getConsumer().getChannel()->getName().string(), res);
- queue->getConsumer().sendFinishedSignal();
- return -1;
- }
-
- *outEvent = myEvent;
- return 0;
+ return queue->getEvent(outEvent);
}
-void AInputQueue_finishEvent(AInputQueue* queue, AInputEvent* event,
- int handled) {
- if (!handled && ((InputEvent*)event)->getType() == INPUT_EVENT_TYPE_KEY
- && ((KeyEvent*)event)->hasDefaultAction()) {
- // The app didn't handle this, but it may have a default action
- // associated with it. We need to hand this back to Java to be
- // executed.
- queue->doDefaultKey((KeyEvent*)event);
- return;
- }
-
- int32_t res = queue->getConsumer().sendFinishedSignal();
- if (res != android::OK) {
- LOGW("Failed to send finished signal on channel '%s'. status=%d",
- queue->getConsumer().getChannel()->getName().string(), res);
- }
+void AInputQueue_finishEvent(AInputQueue* queue, AInputEvent* event, int handled) {
+ queue->finishEvent(event, handled != 0);
}
diff --git a/native/android/native_activity.cpp b/native/android/native_activity.cpp
index 509cc33..0c6823a 100644
--- a/native/android/native_activity.cpp
+++ b/native/android/native_activity.cpp
@@ -29,3 +29,11 @@
uint32_t addFlags, uint32_t removeFlags) {
android_NativeActivity_setWindowFlags(activity, addFlags, addFlags|removeFlags);
}
+
+void ANativeActivity_showSoftInput(ANativeActivity* activity, uint32_t flags) {
+ android_NativeActivity_showSoftInput(activity, flags);
+}
+
+void ANativeActivity_hideSoftInput(ANativeActivity* activity, uint32_t flags) {
+ android_NativeActivity_hideSoftInput(activity, flags);
+}
diff --git a/native/android/sensor.cpp b/native/android/sensor.cpp
new file mode 100644
index 0000000..7a3907e
--- /dev/null
+++ b/native/android/sensor.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "sensor"
+#include <utils/Log.h>
+
+#include <android/looper.h>
+#include <android/sensor.h>
+
+#include <utils/RefBase.h>
+#include <utils/PollLoop.h>
+#include <utils/Timers.h>
+
+#include <gui/Sensor.h>
+#include <gui/SensorManager.h>
+#include <gui/SensorEventQueue.h>
+
+#include <poll.h>
+
+using android::sp;
+using android::Sensor;
+using android::SensorManager;
+using android::SensorEventQueue;
+using android::String8;
+
+/*****************************************************************************/
+
+ASensorManager* ASensorManager_getInstance()
+{
+ return &SensorManager::getInstance();
+}
+
+int ASensorManager_getSensorList(ASensorManager* manager, ASensor** list)
+{
+ Sensor* l;
+ int c = static_cast<SensorManager*>(manager)->getSensorList(&l);
+ if (list) {
+ *list = l;
+ }
+ return c;
+}
+
+ASensor* ASensorManager_getDefaultSensor(ASensorManager* manager, int type)
+{
+ return static_cast<SensorManager*>(manager)->getDefaultSensor(type);
+}
+
+ASensorEventQueue* ASensorManager_createEventQueue(ASensorManager* manager,
+ ALooper* looper, ALooper_callbackFunc* callback, void* data)
+{
+ sp<SensorEventQueue> queue =
+ static_cast<SensorManager*>(manager)->createEventQueue();
+ if (queue != 0) {
+ ALooper_addFd(looper, queue->getFd(), POLLIN, callback, data);
+ queue->looper = looper;
+ queue->incStrong(manager);
+ }
+ return static_cast<ASensorEventQueue*>(queue.get());
+}
+
+int ASensorManager_destroyEventQueue(ASensorManager* manager,
+ ASensorEventQueue* inQueue)
+{
+ sp<SensorEventQueue> queue = static_cast<SensorEventQueue*>(inQueue);
+ ALooper_removeFd(queue->looper, queue->getFd());
+ queue->decStrong(manager);
+ return 0;
+}
+
+/*****************************************************************************/
+
+int ASensorEventQueue_enableSensor(ASensorEventQueue* queue, ASensor* sensor)
+{
+ return static_cast<SensorEventQueue*>(queue)->enableSensor(
+ static_cast<Sensor*>(sensor));
+}
+
+int ASensorEventQueue_disableSensor(ASensorEventQueue* queue, ASensor* sensor)
+{
+ return static_cast<SensorEventQueue*>(queue)->disableSensor(
+ static_cast<Sensor*>(sensor));
+}
+
+int ASensorEventQueue_setEventRate(ASensorEventQueue* queue, ASensor* sensor,
+ int32_t usec)
+{
+ return static_cast<SensorEventQueue*>(queue)->setEventRate(
+ static_cast<Sensor*>(sensor), us2ns(usec));
+}
+
+int ASensorEventQueue_hasEvents(ASensorEventQueue* queue)
+{
+ struct pollfd pfd;
+ pfd.fd = static_cast<SensorEventQueue*>(queue)->getFd();
+ pfd.events = POLLIN;
+ pfd.revents = 0;
+
+ int nfd = poll(&pfd, 1, 0);
+
+ if (nfd < 0)
+ return -errno;
+
+ if (pfd.revents != POLLIN)
+ return -1;
+
+ return (nfd == 0) ? 0 : 1;
+}
+
+ssize_t ASensorEventQueue_getEvents(ASensorEventQueue* queue,
+ ASensorEvent* events, size_t count)
+{
+ return static_cast<SensorEventQueue*>(queue)->read(events, count);
+}
+
+
+/*****************************************************************************/
+
+const char* ASensor_getName(ASensor* sensor)
+{
+ return static_cast<Sensor*>(sensor)->getName().string();
+}
+
+const char* ASensor_getVendor(ASensor* sensor)
+{
+ return static_cast<Sensor*>(sensor)->getVendor().string();
+}
+
+int ASensor_getType(ASensor* sensor)
+{
+ return static_cast<Sensor*>(sensor)->getType();
+}
+
+float ASensor_getResolution(ASensor* sensor)
+{
+ return static_cast<Sensor*>(sensor)->getResolution();
+}
+
diff --git a/native/glue/threaded_app/Android.mk b/native/glue/threaded_app/Android.mk
deleted file mode 100644
index cfc9b2a..0000000
--- a/native/glue/threaded_app/Android.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-BASE_PATH := $(call my-dir)
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-# our source files
-#
-LOCAL_SRC_FILES:= \
- threaded_app.c
-
-LOCAL_C_INCLUDES += \
- frameworks/base/native/include \
- frameworks/base/core/jni/android \
- dalvik/libnativehelper/include/nativehelper
-
-LOCAL_MODULE:= libthreaded_app
-
-include $(BUILD_STATIC_LIBRARY)
diff --git a/native/glue/threaded_app/threaded_app.c b/native/glue/threaded_app/threaded_app.c
deleted file mode 100644
index 2411e93..0000000
--- a/native/glue/threaded_app/threaded_app.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <jni.h>
-
-#include <errno.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/resource.h>
-
-#include <android_glue/threaded_app.h>
-
-#include <android/log.h>
-
-#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "threaded_app", __VA_ARGS__))
-#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "threaded_app", __VA_ARGS__))
-
-int8_t android_app_read_cmd(struct android_app* android_app) {
- int8_t cmd;
- if (read(android_app->msgread, &cmd, sizeof(cmd)) == sizeof(cmd)) {
- return cmd;
- } else {
- LOGW("No data on command pipe!");
- }
- return -1;
-}
-
-int32_t android_app_exec_cmd(struct android_app* android_app, int8_t cmd) {
- switch (cmd) {
- case APP_CMD_INPUT_CHANGED:
- LOGI("APP_CMD_INPUT_CHANGED\n");
- pthread_mutex_lock(&android_app->mutex);
- if (android_app->inputQueue != NULL) {
- AInputQueue_detachLooper(android_app->inputQueue);
- }
- android_app->inputQueue = android_app->pendingInputQueue;
- if (android_app->inputQueue != NULL) {
- LOGI("Attaching input queue to looper");
- AInputQueue_attachLooper(android_app->inputQueue,
- android_app->looper, NULL, (void*)LOOPER_ID_EVENT);
- }
- pthread_cond_broadcast(&android_app->cond);
- pthread_mutex_unlock(&android_app->mutex);
- break;
-
- case APP_CMD_WINDOW_CHANGED:
- LOGI("APP_CMD_WINDOW_CHANGED\n");
- pthread_mutex_lock(&android_app->mutex);
- android_app->window = android_app->pendingWindow;
- pthread_cond_broadcast(&android_app->cond);
- pthread_mutex_unlock(&android_app->mutex);
- break;
-
- case APP_CMD_START:
- case APP_CMD_RESUME:
- case APP_CMD_PAUSE:
- case APP_CMD_STOP:
- LOGI("activityState=%d\n", cmd);
- pthread_mutex_lock(&android_app->mutex);
- android_app->activityState = cmd;
- pthread_cond_broadcast(&android_app->cond);
- pthread_mutex_unlock(&android_app->mutex);
- break;
-
- case APP_CMD_DESTROY:
- LOGI("APP_CMD_DESTROY\n");
- android_app->destroyRequested = 1;
- break;
- }
-
- return android_app->destroyRequested ? 0 : 1;
-}
-
-static void android_app_destroy(struct android_app* android_app) {
- LOGI("android_app_destroy!");
- pthread_mutex_lock(&android_app->mutex);
- if (android_app->inputQueue != NULL) {
- AInputQueue_detachLooper(android_app->inputQueue);
- }
- android_app->destroyed = 1;
- pthread_cond_broadcast(&android_app->cond);
- pthread_mutex_unlock(&android_app->mutex);
- // Can't touch android_app object after this.
-}
-
-static void* android_app_entry(void* param) {
- struct android_app* android_app = (struct android_app*)param;
-
- ALooper* looper = ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
- ALooper_addFd(looper, android_app->msgread, POLLIN, NULL, (void*)LOOPER_ID_MAIN);
- android_app->looper = looper;
-
- pthread_mutex_lock(&android_app->mutex);
- android_app->running = 1;
- pthread_cond_broadcast(&android_app->cond);
- pthread_mutex_unlock(&android_app->mutex);
-
- android_main(android_app);
-
- android_app_destroy(android_app);
- return NULL;
-}
-
-// --------------------------------------------------------------------
-// Native activity interaction (called from main thread)
-// --------------------------------------------------------------------
-
-static struct android_app* android_app_create(ANativeActivity* activity) {
- struct android_app* android_app = (struct android_app*)malloc(sizeof(struct android_app));
- memset(android_app, 0, sizeof(struct android_app));
- android_app->activity = activity;
-
- pthread_mutex_init(&android_app->mutex, NULL);
- pthread_cond_init(&android_app->cond, NULL);
-
- int msgpipe[2];
- if (pipe(msgpipe)) {
- LOGI("could not create pipe: %s", strerror(errno));
- }
- android_app->msgread = msgpipe[0];
- android_app->msgwrite = msgpipe[1];
-
- pthread_attr_t attr;
- pthread_attr_init(&attr);
- pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
- pthread_create(&android_app->thread, &attr, android_app_entry, android_app);
-
- // Wait for thread to start.
- pthread_mutex_lock(&android_app->mutex);
- while (!android_app->running) {
- pthread_cond_wait(&android_app->cond, &android_app->mutex);
- }
- pthread_mutex_unlock(&android_app->mutex);
-
- return android_app;
-}
-
-static void android_app_write_cmd(struct android_app* android_app, int8_t cmd) {
- if (write(android_app->msgwrite, &cmd, sizeof(cmd)) != sizeof(cmd)) {
- LOGI("Failure writing android_app cmd: %s\n", strerror(errno));
- }
-}
-
-static void android_app_set_input(struct android_app* android_app, AInputQueue* inputQueue) {
- pthread_mutex_lock(&android_app->mutex);
- android_app->pendingInputQueue = inputQueue;
- android_app_write_cmd(android_app, APP_CMD_INPUT_CHANGED);
- while (android_app->inputQueue != android_app->pendingInputQueue) {
- pthread_cond_wait(&android_app->cond, &android_app->mutex);
- }
- pthread_mutex_unlock(&android_app->mutex);
-}
-
-static void android_app_set_window(struct android_app* android_app, ANativeWindow* window) {
- pthread_mutex_lock(&android_app->mutex);
- android_app->pendingWindow = window;
- android_app_write_cmd(android_app, APP_CMD_WINDOW_CHANGED);
- while (android_app->window != android_app->pendingWindow) {
- pthread_cond_wait(&android_app->cond, &android_app->mutex);
- }
- pthread_mutex_unlock(&android_app->mutex);
-}
-
-static void android_app_set_activity_state(struct android_app* android_app, int8_t cmd) {
- pthread_mutex_lock(&android_app->mutex);
- android_app_write_cmd(android_app, cmd);
- while (android_app->activityState != cmd) {
- pthread_cond_wait(&android_app->cond, &android_app->mutex);
- }
- pthread_mutex_unlock(&android_app->mutex);
-}
-
-static void android_app_free(struct android_app* android_app) {
- pthread_mutex_lock(&android_app->mutex);
- android_app_write_cmd(android_app, APP_CMD_DESTROY);
- while (!android_app->destroyed) {
- pthread_cond_wait(&android_app->cond, &android_app->mutex);
- }
- pthread_mutex_unlock(&android_app->mutex);
-
- close(android_app->msgread);
- close(android_app->msgwrite);
- pthread_cond_destroy(&android_app->cond);
- pthread_mutex_destroy(&android_app->mutex);
- free(android_app);
-}
-
-static void onDestroy(ANativeActivity* activity) {
- LOGI("Destroy: %p\n", activity);
- android_app_free((struct android_app*)activity->instance);
-}
-
-static void onStart(ANativeActivity* activity) {
- LOGI("Start: %p\n", activity);
- android_app_set_activity_state((struct android_app*)activity->instance, APP_CMD_START);
-}
-
-static void onResume(ANativeActivity* activity) {
- LOGI("Resume: %p\n", activity);
- android_app_set_activity_state((struct android_app*)activity->instance, APP_CMD_RESUME);
-}
-
-static void* onSaveInstanceState(ANativeActivity* activity, size_t* outLen) {
- LOGI("SaveInstanceState: %p\n", activity);
- return NULL;
-}
-
-static void onPause(ANativeActivity* activity) {
- LOGI("Pause: %p\n", activity);
- android_app_set_activity_state((struct android_app*)activity->instance, APP_CMD_PAUSE);
-}
-
-static void onStop(ANativeActivity* activity) {
- LOGI("Stop: %p\n", activity);
- android_app_set_activity_state((struct android_app*)activity->instance, APP_CMD_STOP);
-}
-
-static void onLowMemory(ANativeActivity* activity) {
- LOGI("LowMemory: %p\n", activity);
-}
-
-static void onWindowFocusChanged(ANativeActivity* activity, int focused) {
- LOGI("WindowFocusChanged: %p -- %d\n", activity, focused);
- android_app_write_cmd((struct android_app*)activity->instance,
- focused ? APP_CMD_GAINED_FOCUS : APP_CMD_LOST_FOCUS);
-}
-
-static void onNativeWindowCreated(ANativeActivity* activity, ANativeWindow* window) {
- LOGI("NativeWindowCreated: %p -- %p\n", activity, window);
- android_app_set_window((struct android_app*)activity->instance, window);
-}
-
-static void onNativeWindowDestroyed(ANativeActivity* activity, ANativeWindow* window) {
- LOGI("NativeWindowDestroyed: %p -- %p\n", activity, window);
- android_app_set_window((struct android_app*)activity->instance, NULL);
-}
-
-static void onInputQueueCreated(ANativeActivity* activity, AInputQueue* queue) {
- LOGI("InputQueueCreated: %p -- %p\n", activity, queue);
- android_app_set_input((struct android_app*)activity->instance, queue);
-}
-
-static void onInputQueueDestroyed(ANativeActivity* activity, AInputQueue* queue) {
- LOGI("InputQueueDestroyed: %p -- %p\n", activity, queue);
- android_app_set_input((struct android_app*)activity->instance, NULL);
-}
-
-void ANativeActivity_onCreate(ANativeActivity* activity,
- void* savedState, size_t savedStateSize) {
- LOGI("Creating: %p\n", activity);
- activity->callbacks->onDestroy = onDestroy;
- activity->callbacks->onStart = onStart;
- activity->callbacks->onResume = onResume;
- activity->callbacks->onSaveInstanceState = onSaveInstanceState;
- activity->callbacks->onPause = onPause;
- activity->callbacks->onStop = onStop;
- activity->callbacks->onLowMemory = onLowMemory;
- activity->callbacks->onWindowFocusChanged = onWindowFocusChanged;
- activity->callbacks->onNativeWindowCreated = onNativeWindowCreated;
- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyed;
- activity->callbacks->onInputQueueCreated = onInputQueueCreated;
- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyed;
-
- activity->instance = android_app_create(activity);
-}
diff --git a/native/include/android/input.h b/native/include/android/input.h
index 014b6a3..25dd68e 100644
--- a/native/include/android/input.h
+++ b/native/include/android/input.h
@@ -68,7 +68,10 @@
INPUT_DEVICE_CLASS_TOUCHSCREEN_MT= 0x00000010,
/* The input device is a directional pad. */
- INPUT_DEVICE_CLASS_DPAD = 0x00000020
+ INPUT_DEVICE_CLASS_DPAD = 0x00000020,
+
+ /* The input device is a gamepad (implies keyboard). */
+ INPUT_DEVICE_CLASS_GAMEPAD = 0x00000040
};
/*
diff --git a/native/include/android/keycodes.h b/native/include/android/keycodes.h
index 36855c5..496eccc0 100644
--- a/native/include/android/keycodes.h
+++ b/native/include/android/keycodes.h
@@ -41,114 +41,122 @@
/*
* Key codes.
- *
- * XXX: The declarations in <ui/KeycodeLabel.h> should be updated to use these instead.
- * We should probably move this into android/keycodes.h and add some new API for
- * getting labels so that we can remove the other tables also in KeycodeLabel.h.
*/
enum {
- KEYCODE_UNKNOWN = 0,
- KEYCODE_SOFT_LEFT = 1,
- KEYCODE_SOFT_RIGHT = 2,
- KEYCODE_HOME = 3,
- KEYCODE_BACK = 4,
- KEYCODE_CALL = 5,
- KEYCODE_ENDCALL = 6,
- KEYCODE_0 = 7,
- KEYCODE_1 = 8,
- KEYCODE_2 = 9,
- KEYCODE_3 = 10,
- KEYCODE_4 = 11,
- KEYCODE_5 = 12,
- KEYCODE_6 = 13,
- KEYCODE_7 = 14,
- KEYCODE_8 = 15,
- KEYCODE_9 = 16,
- KEYCODE_STAR = 17,
- KEYCODE_POUND = 18,
- KEYCODE_DPAD_UP = 19,
- KEYCODE_DPAD_DOWN = 20,
- KEYCODE_DPAD_LEFT = 21,
- KEYCODE_DPAD_RIGHT = 22,
- KEYCODE_DPAD_CENTER = 23,
- KEYCODE_VOLUME_UP = 24,
- KEYCODE_VOLUME_DOWN = 25,
- KEYCODE_POWER = 26,
- KEYCODE_CAMERA = 27,
- KEYCODE_CLEAR = 28,
- KEYCODE_A = 29,
- KEYCODE_B = 30,
- KEYCODE_C = 31,
- KEYCODE_D = 32,
- KEYCODE_E = 33,
- KEYCODE_F = 34,
- KEYCODE_G = 35,
- KEYCODE_H = 36,
- KEYCODE_I = 37,
- KEYCODE_J = 38,
- KEYCODE_K = 39,
- KEYCODE_L = 40,
- KEYCODE_M = 41,
- KEYCODE_N = 42,
- KEYCODE_O = 43,
- KEYCODE_P = 44,
- KEYCODE_Q = 45,
- KEYCODE_R = 46,
- KEYCODE_S = 47,
- KEYCODE_T = 48,
- KEYCODE_U = 49,
- KEYCODE_V = 50,
- KEYCODE_W = 51,
- KEYCODE_X = 52,
- KEYCODE_Y = 53,
- KEYCODE_Z = 54,
- KEYCODE_COMMA = 55,
- KEYCODE_PERIOD = 56,
- KEYCODE_ALT_LEFT = 57,
- KEYCODE_ALT_RIGHT = 58,
- KEYCODE_SHIFT_LEFT = 59,
- KEYCODE_SHIFT_RIGHT = 60,
- KEYCODE_TAB = 61,
- KEYCODE_SPACE = 62,
- KEYCODE_SYM = 63,
- KEYCODE_EXPLORER = 64,
- KEYCODE_ENVELOPE = 65,
- KEYCODE_ENTER = 66,
- KEYCODE_DEL = 67,
- KEYCODE_GRAVE = 68,
- KEYCODE_MINUS = 69,
- KEYCODE_EQUALS = 70,
- KEYCODE_LEFT_BRACKET = 71,
- KEYCODE_RIGHT_BRACKET = 72,
- KEYCODE_BACKSLASH = 73,
- KEYCODE_SEMICOLON = 74,
- KEYCODE_APOSTROPHE = 75,
- KEYCODE_SLASH = 76,
- KEYCODE_AT = 77,
- KEYCODE_NUM = 78,
- KEYCODE_HEADSETHOOK = 79,
- KEYCODE_FOCUS = 80, // *Camera* focus
- KEYCODE_PLUS = 81,
- KEYCODE_MENU = 82,
- KEYCODE_NOTIFICATION = 83,
- KEYCODE_SEARCH = 84,
- KEYCODE_MEDIA_PLAY_PAUSE= 85,
- KEYCODE_MEDIA_STOP = 86,
- KEYCODE_MEDIA_NEXT = 87,
- KEYCODE_MEDIA_PREVIOUS = 88,
- KEYCODE_MEDIA_REWIND = 89,
- KEYCODE_MEDIA_FAST_FORWARD = 90,
- KEYCODE_MUTE = 91,
- KEYCODE_PAGE_UP = 92,
- KEYCODE_PAGE_DOWN = 93
+ AKEYCODE_UNKNOWN = 0,
+ AKEYCODE_SOFT_LEFT = 1,
+ AKEYCODE_SOFT_RIGHT = 2,
+ AKEYCODE_HOME = 3,
+ AKEYCODE_BACK = 4,
+ AKEYCODE_CALL = 5,
+ AKEYCODE_ENDCALL = 6,
+ AKEYCODE_0 = 7,
+ AKEYCODE_1 = 8,
+ AKEYCODE_2 = 9,
+ AKEYCODE_3 = 10,
+ AKEYCODE_4 = 11,
+ AKEYCODE_5 = 12,
+ AKEYCODE_6 = 13,
+ AKEYCODE_7 = 14,
+ AKEYCODE_8 = 15,
+ AKEYCODE_9 = 16,
+ AKEYCODE_STAR = 17,
+ AKEYCODE_POUND = 18,
+ AKEYCODE_DPAD_UP = 19,
+ AKEYCODE_DPAD_DOWN = 20,
+ AKEYCODE_DPAD_LEFT = 21,
+ AKEYCODE_DPAD_RIGHT = 22,
+ AKEYCODE_DPAD_CENTER = 23,
+ AKEYCODE_VOLUME_UP = 24,
+ AKEYCODE_VOLUME_DOWN = 25,
+ AKEYCODE_POWER = 26,
+ AKEYCODE_CAMERA = 27,
+ AKEYCODE_CLEAR = 28,
+ AKEYCODE_A = 29,
+ AKEYCODE_B = 30,
+ AKEYCODE_C = 31,
+ AKEYCODE_D = 32,
+ AKEYCODE_E = 33,
+ AKEYCODE_F = 34,
+ AKEYCODE_G = 35,
+ AKEYCODE_H = 36,
+ AKEYCODE_I = 37,
+ AKEYCODE_J = 38,
+ AKEYCODE_K = 39,
+ AKEYCODE_L = 40,
+ AKEYCODE_M = 41,
+ AKEYCODE_N = 42,
+ AKEYCODE_O = 43,
+ AKEYCODE_P = 44,
+ AKEYCODE_Q = 45,
+ AKEYCODE_R = 46,
+ AKEYCODE_S = 47,
+ AKEYCODE_T = 48,
+ AKEYCODE_U = 49,
+ AKEYCODE_V = 50,
+ AKEYCODE_W = 51,
+ AKEYCODE_X = 52,
+ AKEYCODE_Y = 53,
+ AKEYCODE_Z = 54,
+ AKEYCODE_COMMA = 55,
+ AKEYCODE_PERIOD = 56,
+ AKEYCODE_ALT_LEFT = 57,
+ AKEYCODE_ALT_RIGHT = 58,
+ AKEYCODE_SHIFT_LEFT = 59,
+ AKEYCODE_SHIFT_RIGHT = 60,
+ AKEYCODE_TAB = 61,
+ AKEYCODE_SPACE = 62,
+ AKEYCODE_SYM = 63,
+ AKEYCODE_EXPLORER = 64,
+ AKEYCODE_ENVELOPE = 65,
+ AKEYCODE_ENTER = 66,
+ AKEYCODE_DEL = 67,
+ AKEYCODE_GRAVE = 68,
+ AKEYCODE_MINUS = 69,
+ AKEYCODE_EQUALS = 70,
+ AKEYCODE_LEFT_BRACKET = 71,
+ AKEYCODE_RIGHT_BRACKET = 72,
+ AKEYCODE_BACKSLASH = 73,
+ AKEYCODE_SEMICOLON = 74,
+ AKEYCODE_APOSTROPHE = 75,
+ AKEYCODE_SLASH = 76,
+ AKEYCODE_AT = 77,
+ AKEYCODE_NUM = 78,
+ AKEYCODE_HEADSETHOOK = 79,
+ AKEYCODE_FOCUS = 80, // *Camera* focus
+ AKEYCODE_PLUS = 81,
+ AKEYCODE_MENU = 82,
+ AKEYCODE_NOTIFICATION = 83,
+ AKEYCODE_SEARCH = 84,
+ AKEYCODE_MEDIA_PLAY_PAUSE= 85,
+ AKEYCODE_MEDIA_STOP = 86,
+ AKEYCODE_MEDIA_NEXT = 87,
+ AKEYCODE_MEDIA_PREVIOUS = 88,
+ AKEYCODE_MEDIA_REWIND = 89,
+ AKEYCODE_MEDIA_FAST_FORWARD = 90,
+ AKEYCODE_MUTE = 91,
+ AKEYCODE_PAGE_UP = 92,
+ AKEYCODE_PAGE_DOWN = 93,
+ AKEYCODE_PICTSYMBOLS = 94,
+ AKEYCODE_SWITCH_CHARSET = 95,
+ AKEYCODE_BUTTON_A = 96,
+ AKEYCODE_BUTTON_B = 97,
+ AKEYCODE_BUTTON_C = 98,
+ AKEYCODE_BUTTON_X = 99,
+ AKEYCODE_BUTTON_Y = 100,
+ AKEYCODE_BUTTON_Z = 101,
+ AKEYCODE_BUTTON_L1 = 102,
+ AKEYCODE_BUTTON_R1 = 103,
+ AKEYCODE_BUTTON_L2 = 104,
+ AKEYCODE_BUTTON_R2 = 105,
+ AKEYCODE_BUTTON_THUMBL = 106,
+ AKEYCODE_BUTTON_THUMBR = 107,
+ AKEYCODE_BUTTON_START = 108,
+ AKEYCODE_BUTTON_SELECT = 109,
+ AKEYCODE_BUTTON_MODE = 110,
- /* NOTE: If you add a new keycode here you must also add it to:
- * native/include/android/keycodes.h
- * frameworks/base/include/ui/KeycodeLabels.h
- * frameworks/base/core/java/android/view/KeyEvent.java
- * tools/puppet_master/PuppetMaster.nav_keys.py
- * frameworks/base/core/res/res/values/attrs.xml
- */
+ // NOTE: If you add a new keycode here you must also add it to several other files.
+ // Refer to frameworks/base/core/java/android/view/KeyEvent.java for the full list.
};
#ifdef __cplusplus
diff --git a/native/include/android/native_activity.h b/native/include/android/native_activity.h
index d0ff052..ea6f05f 100644
--- a/native/include/android/native_activity.h
+++ b/native/include/android/native_activity.h
@@ -147,6 +147,21 @@
void (*onNativeWindowCreated)(ANativeActivity* activity, ANativeWindow* window);
/**
+ * The drawing window for this native activity has been resized. You should
+ * retrieve the new size from the window and ensure that your rendering in
+ * it now matches.
+ */
+ void (*onNativeWindowResized)(ANativeActivity* activity, ANativeWindow* window);
+
+ /**
+ * The drawing window for this native activity needs to be redrawn. To avoid
+ * transient artifacts during screen changes (such resizing after rotation),
+ * applications should not return from this function until they have finished
+ * drawing their window in its current state.
+ */
+ void (*onNativeWindowRedrawNeeded)(ANativeActivity* activity, ANativeWindow* window);
+
+ /**
* The drawing window for this native activity is going to be destroyed.
* You MUST ensure that you do not touch the window object after returning
* from this function: in the common case of drawing to the window from
@@ -170,6 +185,11 @@
void (*onInputQueueDestroyed)(ANativeActivity* activity, AInputQueue* queue);
/**
+ * The rectangle in the window in which content should be placed has changed.
+ */
+ void (*onContentRectChanged)(ANativeActivity* activity, const ARect* rect);
+
+ /**
* The system is running low on memory. Use this callback to release
* resources you do not need, to help the system avoid killing more
* important processes.
@@ -197,6 +217,28 @@
void ANativeActivity_setWindowFlags(ANativeActivity* activity,
uint32_t addFlags, uint32_t removeFlags);
+/**
+ * Flags for ANativeActivity_showSoftInput; see the Java InputMethodManager
+ * API for documentation.
+ */
+enum {
+ ANATIVEACTIVITY_SHOW_SOFT_INPUT_IMPLICIT = 0x0001,
+ ANATIVEACTIVITY_SHOW_SOFT_INPUT_FORCED = 0x0002,
+};
+
+void ANativeActivity_showSoftInput(ANativeActivity* activity, uint32_t flags);
+
+/**
+ * Flags for ANativeActivity_hideSoftInput; see the Java InputMethodManager
+ * API for documentation.
+ */
+enum {
+ ANATIVEACTIVITY_HIDE_SOFT_INPUT_IMPLICIT_ONLY = 0x0001,
+ ANATIVEACTIVITY_HIDE_SOFT_INPUT_NOT_ALWAYS = 0x0002,
+};
+
+void ANativeActivity_hideSoftInput(ANativeActivity* activity, uint32_t flags);
+
#ifdef __cplusplus
};
#endif
diff --git a/native/include/android/sensor.h b/native/include/android/sensor.h
new file mode 100644
index 0000000..4291d3e
--- /dev/null
+++ b/native/include/android/sensor.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_SENSOR_H
+#define ANDROID_SENSOR_H
+
+/******************************************************************
+ *
+ * IMPORTANT NOTICE:
+ *
+ * This file is part of Android's set of stable system headers
+ * exposed by the Android NDK (Native Development Kit).
+ *
+ * Third-party source AND binary code relies on the definitions
+ * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
+ *
+ * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
+ * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
+ * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
+ * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
+ */
+
+/*
+ * Structures and functions to receive and process sensor events in
+ * native code.
+ *
+ */
+
+#include <sys/types.h>
+
+#include <android/looper.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Sensor types
+ * (keep in sync with hardware/sensor.h)
+ */
+
+enum {
+ ASENSOR_TYPE_ACCELEROMETER = 1,
+ ASENSOR_TYPE_MAGNETIC_FIELD = 2,
+ ASENSOR_TYPE_GYROSCOPE = 4,
+ ASENSOR_TYPE_LIGHT = 5,
+ ASENSOR_TYPE_PROXIMITY = 8
+};
+
+/*
+ * Sensor accuracy measure
+ */
+enum {
+ ASENSOR_STATUS_UNRELIABLE = 0,
+ ASENSOR_STATUS_ACCURACY_LOW = 1,
+ ASENSOR_STATUS_ACCURACY_MEDIUM = 2,
+ ASENSOR_STATUS_ACCURACY_HIGH = 3
+};
+
+/*
+ * A few useful constants
+ */
+
+/* Earth's gravity in m/s^2 */
+#define ASENSOR_STANDARD_GRAVITY (9.80665f)
+/* Maximum magnetic field on Earth's surface in uT */
+#define ASENSOR_MAGNETIC_FIELD_EARTH_MAX (60.0f)
+/* Minimum magnetic field on Earth's surface in uT*/
+#define ASENSOR_MAGNETIC_FIELD_EARTH_MIN (30.0f)
+
+/*
+ * A sensor event.
+ */
+
+typedef struct ASensorVector {
+ union {
+ float v[3];
+ struct {
+ float x;
+ float y;
+ float z;
+ };
+ };
+ int8_t status;
+ uint8_t reserved[3];
+} ASensorVector;
+
+typedef struct ASensorEvent {
+ int sensor;
+ int32_t reserved0;
+ union {
+ float data[16];
+ ASensorVector acceleration;
+ ASensorVector magnetic;
+ float temperature;
+ float distance;
+ float light;
+ };
+ int64_t timestamp;
+ int32_t reserved1[4];
+} ASensorEvent;
+
+
+struct ASensorManager;
+typedef struct ASensorManager ASensorManager;
+
+struct ASensorEventQueue;
+typedef struct ASensorEventQueue ASensorEventQueue;
+
+struct ASensor;
+typedef struct ASensor ASensor;
+
+/*****************************************************************************/
+
+/*
+ * Get a reference to the sensor manager. ASensorManager is a singleton.
+ *
+ * Example:
+ *
+ * ASensorManager* sensorManager = ASensorManager_getInstance();
+ *
+ */
+ASensorManager* ASensorManager_getInstance();
+
+
+/*
+ * Returns the list of available sensors.
+ */
+int ASensorManager_getSensorList(ASensorManager* manager, ASensor** list);
+
+/*
+ * Returns the default sensor for the given type, or NULL if no sensor
+ * of that type exist.
+ */
+ASensor* ASensorManager_getDefaultSensor(ASensorManager* manager, int type);
+
+/*
+ * Creates a new sensor event queue and associate it with a looper.
+ */
+ASensorEventQueue* ASensorManager_createEventQueue(ASensorManager* manager,
+ ALooper* looper, ALooper_callbackFunc* callback, void* data);
+
+/*
+ * Destroys the event queue and free all resources associated to it.
+ */
+int ASensorManager_destroyEventQueue(ASensorManager* manager, ASensorEventQueue* queue);
+
+
+/*****************************************************************************/
+
+/*
+ * Enable the selected sensor. Returns a negative error code on failure.
+ */
+int ASensorEventQueue_enableSensor(ASensorEventQueue* queue, ASensor* sensor);
+
+/*
+ * Disable the selected sensor. Returns a negative error code on failure.
+ */
+int ASensorEventQueue_disableSensor(ASensorEventQueue* queue, ASensor* sensor);
+
+/*
+ * Sets the delivery rate of events in microseconds for the given sensor.
+ * Note that this is a hint only, generally event will arrive at a higher
+ * rate.
+ * Returns a negative error code on failure.
+ */
+int ASensorEventQueue_setEventRate(ASensorEventQueue* queue, ASensor* sensor, int32_t usec);
+
+/*
+ * Returns true if there are one or more events available in the
+ * sensor queue. Returns 1 if the queue has events; 0 if
+ * it does not have events; and a negative value if there is an error.
+ */
+int ASensorEventQueue_hasEvents(ASensorEventQueue* queue);
+
+/*
+ * Returns the next available events from the queue. Returns a negative
+ * value if no events are available or an error has occurred, otherwise
+ * the number of events returned.
+ *
+ * Examples:
+ * ASensorEvent event;
+ * ssize_t numEvent = ASensorEventQueue_getEvents(queue, &event, 1);
+ *
+ * ASensorEvent eventBuffer[8];
+ * ssize_t numEvent = ASensorEventQueue_getEvents(queue, eventBuffer, 8);
+ *
+ */
+ssize_t ASensorEventQueue_getEvents(ASensorEventQueue* queue,
+ ASensorEvent* events, size_t count);
+
+
+/*****************************************************************************/
+
+/*
+ * Returns this sensor's name (non localized)
+ */
+const char* ASensor_getName(ASensor* sensor);
+
+/*
+ * Returns this sensor's vendor's name (non localized)
+ */
+const char* ASensor_getVendor(ASensor* sensor);
+
+/*
+ * Return this sensor's type
+ */
+int ASensor_getType(ASensor* sensor);
+
+/*
+ * Returns this sensors's resolution
+ */
+float ASensor_getResolution(ASensor* sensor);
+
+
+#ifdef __cplusplus
+};
+#endif
+
+#endif // ANDROID_SENSOR_H
diff --git a/native/include/android/tts.h b/native/include/android/tts.h
index e5c99f7..fb15108 100644
--- a/native/include/android/tts.h
+++ b/native/include/android/tts.h
@@ -87,6 +87,11 @@
*/
extern android_tts_engine_t *android_getTtsEngine();
+/* Including the old version for legacy support (Froyo compatibility).
+ * This should return the same thing as android_getTtsEngine.
+ */
+extern "C" android_tts_engine_t *getTtsEngine();
+
// A callback type used to notify the framework of new synthetized
// audio samples, status will be SYNTH_DONE for the last sample of
// the last request, of SYNTH_PENDING otherwise.
diff --git a/native/include/android_glue/threaded_app.h b/native/include/android_glue/threaded_app.h
deleted file mode 100644
index adfdbea..0000000
--- a/native/include/android_glue/threaded_app.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <poll.h>
-#include <pthread.h>
-#include <sched.h>
-
-#include <android/native_activity.h>
-#include <android/looper.h>
-
-/**
- * This is the interface for the standard glue code of a threaded
- * application. In this model, the application's code is running
- * in its own thread separate from the main thread of the process.
- * It is not required that this thread be associated with the Java
- * VM, although it will need to be in order to make JNI calls any
- * Java objects.
- */
-struct android_app {
- // The application can place a pointer to its own state object
- // here if it likes.
- void* userData;
-
- // The ANativeActivity object instance that this app is running in.
- ANativeActivity* activity;
-
- // The ALooper associated with the app's thread.
- ALooper* looper;
-
- // When non-NULL, this is the input queue from which the app will
- // receive user input events.
- AInputQueue* inputQueue;
-
- // When non-NULL, this is the window surface that the app can draw in.
- ANativeWindow* window;
-
- // Current state of the app's activity. May be either APP_CMD_START,
- // APP_CMD_RESUME, APP_CMD_PAUSE, or APP_CMD_STOP; see below.
- int activityState;
-
- // -------------------------------------------------
- // Below are "private" implementation of the glue code.
-
- pthread_mutex_t mutex;
- pthread_cond_t cond;
-
- int msgread;
- int msgwrite;
-
- pthread_t thread;
-
- // This is non-zero when the application's NativeActivity is being
- // destroyed and waiting for the app thread to complete.
- int destroyRequested;
-
- int running;
- int destroyed;
- AInputQueue* pendingInputQueue;
- ANativeWindow* pendingWindow;
-};
-
-enum {
- /**
- * Looper data ID of commands coming from the app's main thread.
- * These can be retrieved and processed with android_app_read_cmd()
- * and android_app_exec_cmd().
- */
- LOOPER_ID_MAIN = 1,
-
- /**
- * Looper data ID of events coming from the AInputQueue of the
- * application's window. These can be read via the inputQueue
- * object of android_app.
- */
- LOOPER_ID_EVENT = 2
-};
-
-enum {
- /**
- * Command from main thread: the AInputQueue has changed. Upon processing
- * this command, android_app->inputQueue will be updated to the new queue
- * (or NULL).
- */
- APP_CMD_INPUT_CHANGED,
-
- /**
- * Command from main thread: the ANativeWindow has changed. Upon processing
- * this command, android_app->window will be updated to the new window surface
- * (or NULL).
- */
- APP_CMD_WINDOW_CHANGED,
-
- /**
- * Command from main thread: the app's activity window has gained
- * input focus.
- */
- APP_CMD_GAINED_FOCUS,
-
- /**
- * Command from main thread: the app's activity window has lost
- * input focus.
- */
- APP_CMD_LOST_FOCUS,
-
- /**
- * Command from main thread: the app's activity has been started.
- */
- APP_CMD_START,
-
- /**
- * Command from main thread: the app's activity has been resumed.
- */
- APP_CMD_RESUME,
-
- /**
- * Command from main thread: the app's activity has been paused.
- */
- APP_CMD_PAUSE,
-
- /**
- * Command from main thread: the app's activity has been stopped.
- */
- APP_CMD_STOP,
-
- /**
- * Command from main thread: the app's activity is being destroyed,
- * and waiting for the app thread to clean up and exit before proceeding.
- */
- APP_CMD_DESTROY,
-};
-
-/**
- * Call when ALooper_pollAll() returns LOOPER_ID_MAIN, reading the next
- * app command message.
- */
-int8_t android_app_read_cmd(struct android_app* android_app);
-
-/**
- * Call with the command returned by android_app_read_cmd() to do the
- * default processing of the given command.
- *
- * Important: returns 0 if the app should exit. You must ALWAYS check for
- * a zero return and, if found, exit your android_main() function.
- */
-int32_t android_app_exec_cmd(struct android_app* android_app, int8_t cmd);
-
-/**
- * This is the function that application code must implement, representing
- * the main entry to the app.
- */
-extern void android_main(struct android_app* app);
diff --git a/opengl/java/android/opengl/GLSurfaceView.java b/opengl/java/android/opengl/GLSurfaceView.java
index 2ff231d..41207f7 100644
--- a/opengl/java/android/opengl/GLSurfaceView.java
+++ b/opengl/java/android/opengl/GLSurfaceView.java
@@ -222,6 +222,10 @@
// underlying surface is created and destroyed
SurfaceHolder holder = getHolder();
holder.addCallback(this);
+ // setFormat is done by SurfaceView in SDK 2.3 and newer. Uncomment
+ // this statement if back-porting to 2.2 or older:
+ // holder.setFormat(PixelFormat.RGB_565);
+ //
// setType is not needed for SDK 2.0 or newer. Uncomment this
// statement if back-porting this code to older SDKs.
// holder.setType(SurfaceHolder.SURFACE_TYPE_GPU);
@@ -1103,7 +1107,6 @@
mRenderer = renderer;
}
-
@Override
public void run() {
setName("GLThread " + getId());
@@ -1154,6 +1157,7 @@
boolean sizeChanged = false;
boolean wantRenderNotification = false;
boolean doRenderNotification = false;
+ boolean askedToReleaseEglContext = false;
int w = 0;
int h = 0;
Runnable event = null;
@@ -1179,6 +1183,17 @@
}
}
+ // Do we need to give up the EGL context?
+ if (mShouldReleaseEglContext) {
+ if (LOG_SURFACE) {
+ Log.i("GLThread", "releasing EGL context because asked to tid=" + getId());
+ }
+ stopEglSurfaceLocked();
+ stopEglContextLocked();
+ mShouldReleaseEglContext = false;
+ askedToReleaseEglContext = true;
+ }
+
// Have we lost the EGL context?
if (lostEglContext) {
stopEglSurfaceLocked();
@@ -1228,6 +1243,9 @@
}
if (doRenderNotification) {
+ if (LOG_SURFACE) {
+ Log.i("GLThread", "sending render notification tid=" + getId());
+ }
wantRenderNotification = false;
doRenderNotification = false;
mRenderComplete = true;
@@ -1235,22 +1253,24 @@
}
// Ready to draw?
- if ((!mPaused) && mHasSurface
- && (mWidth > 0) && (mHeight > 0)
- && (mRequestRender || (mRenderMode == RENDERMODE_CONTINUOUSLY))) {
+ if (readyToDraw()) {
// If we don't have an EGL context, try to acquire one.
- if ((! mHaveEglContext) && sGLThreadManager.tryAcquireEglContextLocked(this)) {
- try {
- mEglHelper.start();
- } catch (RuntimeException t) {
- sGLThreadManager.releaseEglContextLocked(this);
- throw t;
- }
- mHaveEglContext = true;
- createEglContext = true;
+ if (! mHaveEglContext) {
+ if (askedToReleaseEglContext) {
+ askedToReleaseEglContext = false;
+ } else if (sGLThreadManager.tryAcquireEglContextLocked(this)) {
+ try {
+ mEglHelper.start();
+ } catch (RuntimeException t) {
+ sGLThreadManager.releaseEglContextLocked(this);
+ throw t;
+ }
+ mHaveEglContext = true;
+ createEglContext = true;
- sGLThreadManager.notifyAll();
+ sGLThreadManager.notifyAll();
+ }
}
if (mHaveEglContext && !mHaveEglSurface) {
@@ -1265,6 +1285,9 @@
w = mWidth;
h = mHeight;
wantRenderNotification = true;
+ if (LOG_SURFACE) {
+ Log.i("GLThread", "noticing that we want render notification tid=" + getId());
+ }
if (DRAW_TWICE_AFTER_SIZE_CHANGED) {
// We keep mRequestRender true so that we draw twice after the size changes.
@@ -1284,7 +1307,16 @@
// By design, this is the only place in a GLThread thread where we wait().
if (LOG_THREADS) {
- Log.i("GLThread", "waiting tid=" + getId());
+ Log.i("GLThread", "waiting tid=" + getId()
+ + " mHaveEglContext: " + mHaveEglContext
+ + " mHaveEglSurface: " + mHaveEglSurface
+ + " mPaused: " + mPaused
+ + " mHasSurface: " + mHasSurface
+ + " mWaitingForSurface: " + mWaitingForSurface
+ + " mWidth: " + mWidth
+ + " mHeight: " + mHeight
+ + " mRequestRender: " + mRequestRender
+ + " mRenderMode: " + mRenderMode);
}
sGLThreadManager.wait();
}
@@ -1326,7 +1358,7 @@
}
if (LOG_RENDERER_DRAW_FRAME) {
- Log.w("GLThread", "onDrawFrame");
+ Log.w("GLThread", "onDrawFrame tid=" + getId());
}
mRenderer.onDrawFrame(gl);
if (!mEglHelper.swap()) {
@@ -1352,6 +1384,16 @@
}
}
+ public boolean ableToDraw() {
+ return mHaveEglContext && mHaveEglSurface && readyToDraw();
+ }
+
+ private boolean readyToDraw() {
+ return (!mPaused) && mHasSurface
+ && (mWidth > 0) && (mHeight > 0)
+ && (mRequestRender || (mRenderMode == RENDERMODE_CONTINUOUSLY));
+ }
+
public void setRenderMode(int renderMode) {
if ( !((RENDERMODE_WHEN_DIRTY <= renderMode) && (renderMode <= RENDERMODE_CONTINUOUSLY)) ) {
throw new IllegalArgumentException("renderMode");
@@ -1461,9 +1503,10 @@
sGLThreadManager.notifyAll();
// Wait for thread to react to resize and render a frame
- while (! mExited && !mPaused && !mRenderComplete ) {
+ while (! mExited && !mPaused && !mRenderComplete
+ && (mGLThread != null && mGLThread.ableToDraw())) {
if (LOG_SURFACE) {
- Log.i("Main thread", "onWindowResize waiting for render complete.");
+ Log.i("Main thread", "onWindowResize waiting for render complete from tid=" + mGLThread.getId());
}
try {
sGLThreadManager.wait();
@@ -1490,6 +1533,11 @@
}
}
+ public void requestReleaseEglContextLocked() {
+ mShouldReleaseEglContext = true;
+ sGLThreadManager.notifyAll();
+ }
+
/**
* Queue an "event" to be run on the GL rendering thread.
* @param r the runnable to be run on the GL rendering thread.
@@ -1514,6 +1562,7 @@
private boolean mWaitingForSurface;
private boolean mHaveEglContext;
private boolean mHaveEglSurface;
+ private boolean mShouldReleaseEglContext;
private int mWidth;
private int mHeight;
private int mRenderMode;
@@ -1598,6 +1647,13 @@
if (mMultipleGLESContextsAllowed) {
return true;
}
+ // Notify the owning thread that it should release the context.
+ // TODO: implement a fairness policy. Currently
+ // if the owning thread is drawing continuously it will just
+ // reacquire the EGL context.
+ if (mEglOwner != null) {
+ mEglOwner.requestReleaseEglContextLocked();
+ }
return false;
}
diff --git a/opengl/tests/gl_perf/filltest.cpp b/opengl/tests/gl_perf/filltest.cpp
index ef85807..eb398ec 100644
--- a/opengl/tests/gl_perf/filltest.cpp
+++ b/opengl/tests/gl_perf/filltest.cpp
@@ -19,6 +19,7 @@
#include <time.h>
#include <sched.h>
#include <sys/resource.h>
+#include <string.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
@@ -131,7 +132,7 @@
double mpps = pixels / delta / 1000000;
double dc60 = pixels / delta / (w * h) / 60;
- printf("test %s, Mpps %f, dc = %f\n", str, mpps, dc60);
+ printf("%s, %f, %f\n", str, mpps, dc60);
}
static const char gVertexShader[] =
@@ -318,12 +319,12 @@
glBlendFunc(GL_ONE, GL_ONE);
glDisable(GL_BLEND);
- sprintf(str2, "Test varColor=%i, texCount=%i, modulate=%i, extraMath=%i, texSize=%i, blend=0",
+ sprintf(str2, "%i, %i, %i, %i, %i, 0",
useVarColor, texCount, modulateFirstTex, extraMath, tex0);
doLoop(w, h, str2);
glEnable(GL_BLEND);
- sprintf(str2, "Test varColor=%i, texCount=%i, modulate=%i, extraMath=%i, texSize=%i, blend=1",
+ sprintf(str2, "%i, %i, %i, %i, %i, 1",
useVarColor, texCount, modulateFirstTex, extraMath, tex0);
doLoop(w, h, str2);
}
@@ -360,6 +361,8 @@
setupVA();
genTextures();
+ printf("\nvarColor, texCount, modulate, extraMath, texSize, blend, Mpps, DC60\n");
+
for (int texCount = 0; texCount < 3; texCount++) {
for (int extraMath = 0; extraMath < 5; extraMath++) {
diff --git a/opengl/tests/gl_perf/gl2_perf.cpp b/opengl/tests/gl_perf/gl2_perf.cpp
index ef30289..9dfcf1c 100644
--- a/opengl/tests/gl_perf/gl2_perf.cpp
+++ b/opengl/tests/gl_perf/gl2_perf.cpp
@@ -31,18 +31,6 @@
using namespace android;
-static void printGLString(const char *name, GLenum s) {
- // fprintf(stderr, "printGLString %s, %d\n", name, s);
- const char *v = (const char *) glGetString(s);
- // int error = glGetError();
- // fprintf(stderr, "glGetError() = %d, result of glGetString = %x\n", error,
- // (unsigned int) v);
- // if ((v < (const char*) 0) || (v > (const char*) 0x10000))
- // fprintf(stderr, "GL %s = %s\n", name, v);
- // else
- // fprintf(stderr, "GL %s = (null) 0x%08x\n", name, (unsigned int) v);
- fprintf(stderr, "GL %s = %s\n", name, v);
-}
static void checkEglError(const char* op, EGLBoolean returnVal = EGL_TRUE) {
if (returnVal != EGL_TRUE) {
@@ -63,89 +51,6 @@
}
}
-void printEGLConfiguration(EGLDisplay dpy, EGLConfig config) {
-
-#define X(VAL) {VAL, #VAL}
- struct {EGLint attribute; const char* name;} names[] = {
- X(EGL_BUFFER_SIZE),
- X(EGL_ALPHA_SIZE),
- X(EGL_BLUE_SIZE),
- X(EGL_GREEN_SIZE),
- X(EGL_RED_SIZE),
- X(EGL_DEPTH_SIZE),
- X(EGL_STENCIL_SIZE),
- X(EGL_CONFIG_CAVEAT),
- X(EGL_CONFIG_ID),
- X(EGL_LEVEL),
- X(EGL_MAX_PBUFFER_HEIGHT),
- X(EGL_MAX_PBUFFER_PIXELS),
- X(EGL_MAX_PBUFFER_WIDTH),
- X(EGL_NATIVE_RENDERABLE),
- X(EGL_NATIVE_VISUAL_ID),
- X(EGL_NATIVE_VISUAL_TYPE),
- X(EGL_SAMPLES),
- X(EGL_SAMPLE_BUFFERS),
- X(EGL_SURFACE_TYPE),
- X(EGL_TRANSPARENT_TYPE),
- X(EGL_TRANSPARENT_RED_VALUE),
- X(EGL_TRANSPARENT_GREEN_VALUE),
- X(EGL_TRANSPARENT_BLUE_VALUE),
- X(EGL_BIND_TO_TEXTURE_RGB),
- X(EGL_BIND_TO_TEXTURE_RGBA),
- X(EGL_MIN_SWAP_INTERVAL),
- X(EGL_MAX_SWAP_INTERVAL),
- X(EGL_LUMINANCE_SIZE),
- X(EGL_ALPHA_MASK_SIZE),
- X(EGL_COLOR_BUFFER_TYPE),
- X(EGL_RENDERABLE_TYPE),
- X(EGL_CONFORMANT),
- };
-#undef X
-
- for (size_t j = 0; j < sizeof(names) / sizeof(names[0]); j++) {
- EGLint value = -1;
- EGLint returnVal = eglGetConfigAttrib(dpy, config, names[j].attribute, &value);
- EGLint error = eglGetError();
- if (returnVal && error == EGL_SUCCESS) {
- printf(" %s: ", names[j].name);
- printf("%d (0x%x)", value, value);
- }
- }
- printf("\n");
-}
-
-int printEGLConfigurations(EGLDisplay dpy) {
- EGLint numConfig = 0;
- EGLint returnVal = eglGetConfigs(dpy, NULL, 0, &numConfig);
- checkEglError("eglGetConfigs", returnVal);
- if (!returnVal) {
- return false;
- }
-
- printf("Number of EGL configuration: %d\n", numConfig);
-
- EGLConfig* configs = (EGLConfig*) malloc(sizeof(EGLConfig) * numConfig);
- if (! configs) {
- printf("Could not allocate configs.\n");
- return false;
- }
-
- returnVal = eglGetConfigs(dpy, configs, numConfig, &numConfig);
- checkEglError("eglGetConfigs", returnVal);
- if (!returnVal) {
- free(configs);
- return false;
- }
-
- for(int i = 0; i < numConfig; i++) {
- printf("Configuration %d\n", i);
- printEGLConfiguration(dpy, configs[i]);
- }
-
- free(configs);
- return true;
-}
-
bool doTest(uint32_t w, uint32_t h);
static EGLDisplay dpy;
@@ -176,7 +81,6 @@
returnValue = eglInitialize(dpy, &majorVersion, &minorVersion);
checkEglError("eglInitialize", returnValue);
- fprintf(stderr, "EGL version %d.%d\n", majorVersion, minorVersion);
if (returnValue != EGL_TRUE) {
printf("eglInitialize failed\n");
return 0;
@@ -191,9 +95,6 @@
checkEglError("EGLUtils::selectConfigForNativeWindow");
- printf("Chose this configuration:\n");
- printEGLConfiguration(dpy, myConfig);
-
surface = eglCreateWindowSurface(dpy, myConfig, window, NULL);
checkEglError("eglCreateWindowSurface");
if (surface == EGL_NO_SURFACE) {
@@ -218,13 +119,6 @@
checkEglError("eglQuerySurface");
GLint dim = w < h ? w : h;
- fprintf(stderr, "Window dimensions: %d x %d\n", w, h);
-
- printGLString("Version", GL_VERSION);
- printGLString("Vendor", GL_VENDOR);
- printGLString("Renderer", GL_RENDERER);
- printGLString("Extensions", GL_EXTENSIONS);
-
glViewport(0, 0, w, h);
for (;;) {
diff --git a/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java b/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java
index cd4f96d..7395233 100644
--- a/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java
+++ b/packages/SettingsProvider/src/com/android/providers/settings/DatabaseHelper.java
@@ -49,6 +49,7 @@
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
+import java.util.HashSet;
import java.util.List;
/**
@@ -67,11 +68,29 @@
private Context mContext;
+ private static final HashSet<String> mValidTables = new HashSet<String>();
+
+ static {
+ mValidTables.add("system");
+ mValidTables.add("secure");
+ mValidTables.add("bluetooth_devices");
+ mValidTables.add("bookmarks");
+
+ // These are old.
+ mValidTables.add("favorites");
+ mValidTables.add("gservices");
+ mValidTables.add("old_favorites");
+ }
+
public DatabaseHelper(Context context) {
super(context, DATABASE_NAME, null, DATABASE_VERSION);
mContext = context;
}
+ public static boolean isValidTable(String name) {
+ return mValidTables.contains(name);
+ }
+
private void createSecureTable(SQLiteDatabase db) {
db.execSQL("CREATE TABLE secure (" +
"_id INTEGER PRIMARY KEY AUTOINCREMENT," +
diff --git a/packages/SettingsProvider/src/com/android/providers/settings/SettingsProvider.java b/packages/SettingsProvider/src/com/android/providers/settings/SettingsProvider.java
index 1019fa8..6a5290e 100644
--- a/packages/SettingsProvider/src/com/android/providers/settings/SettingsProvider.java
+++ b/packages/SettingsProvider/src/com/android/providers/settings/SettingsProvider.java
@@ -83,6 +83,9 @@
SqlArguments(Uri url, String where, String[] args) {
if (url.getPathSegments().size() == 1) {
this.table = url.getPathSegments().get(0);
+ if (!DatabaseHelper.isValidTable(this.table)) {
+ throw new IllegalArgumentException("Bad root path: " + this.table);
+ }
this.where = where;
this.args = args;
} else if (url.getPathSegments().size() != 2) {
@@ -91,6 +94,9 @@
throw new UnsupportedOperationException("WHERE clause not supported: " + url);
} else {
this.table = url.getPathSegments().get(0);
+ if (!DatabaseHelper.isValidTable(this.table)) {
+ throw new IllegalArgumentException("Bad root path: " + this.table);
+ }
if ("system".equals(this.table) || "secure".equals(this.table)) {
this.where = Settings.NameValueTable.NAME + "=?";
this.args = new String[] { url.getPathSegments().get(1) };
@@ -105,6 +111,9 @@
SqlArguments(Uri url) {
if (url.getPathSegments().size() == 1) {
this.table = url.getPathSegments().get(0);
+ if (!DatabaseHelper.isValidTable(this.table)) {
+ throw new IllegalArgumentException("Bad root path: " + this.table);
+ }
this.where = null;
this.args = null;
} else {
diff --git a/policy/src/com/android/internal/policy/impl/PhoneWindow.java b/policy/src/com/android/internal/policy/impl/PhoneWindow.java
index 5c56d3c..070d1e8 100644
--- a/policy/src/com/android/internal/policy/impl/PhoneWindow.java
+++ b/policy/src/com/android/internal/policy/impl/PhoneWindow.java
@@ -27,6 +27,7 @@
import com.android.internal.view.menu.ContextMenuBuilder;
import com.android.internal.view.menu.MenuBuilder;
import com.android.internal.view.menu.MenuDialogHelper;
+import com.android.internal.view.menu.MenuPopupHelper;
import com.android.internal.view.menu.MenuView;
import com.android.internal.view.menu.SubMenuBuilder;
import com.android.internal.widget.ActionBarView;
@@ -77,9 +78,12 @@
import android.view.inputmethod.InputMethodManager;
import android.widget.FrameLayout;
import android.widget.ImageView;
+import android.widget.ListPopupWindow;
import android.widget.ProgressBar;
import android.widget.TextView;
+import java.lang.ref.WeakReference;
+
/**
* Android-specific Window.
* <p>
@@ -96,7 +100,7 @@
* Simple callback used by the context menu and its submenus. The options
* menu submenus do not use this (their behavior is more complex).
*/
- ContextMenuCallback mContextMenuCallback = new ContextMenuCallback(FEATURE_CONTEXT_MENU);
+ DialogMenuCallback mContextMenuCallback = new DialogMenuCallback(FEATURE_CONTEXT_MENU);
// This is the top-level view of the window, containing the window decor.
private DecorView mDecor;
@@ -105,7 +109,7 @@
// mDecor itself, or a child of mDecor where the contents go.
private ViewGroup mContentParent;
- SurfaceHolder.Callback mTakeSurfaceCallback;
+ SurfaceHolder.Callback2 mTakeSurfaceCallback;
BaseSurfaceHolder mSurfaceHolder;
InputQueue.Callback mTakeInputQueueCallback;
@@ -251,7 +255,7 @@
}
@Override
- public void takeSurface(SurfaceHolder.Callback callback) {
+ public void takeSurface(SurfaceHolder.Callback2 callback) {
mTakeSurfaceCallback = callback;
}
@@ -280,7 +284,7 @@
if (mTitleView != null) {
mTitleView.setText(title);
} else if (mActionBar != null) {
- mActionBar.setTitle(title);
+ mActionBar.setWindowTitle(title);
}
mTitle = title;
}
@@ -808,8 +812,20 @@
return true;
}
- // The window manager will give us a valid window token
- new MenuDialogHelper(subMenu).show(null);
+ final Menu parentMenu = subMenu.getRootMenu();
+ final PanelFeatureState panel = findMenuPanel(parentMenu);
+
+ /*
+ * Use the panel open state to determine whether this is coming from an open panel
+ * or an action button. If it's an open panel we want to use MenuDialogHelper.
+ * If it's closed we want to grab the relevant view and create a popup anchored to it.
+ */
+ if (panel.isOpen) {
+ // The window manager will give us a valid window token
+ new MenuDialogHelper(subMenu).show(null);
+ } else {
+ new MenuPopupHelper(getContext(), subMenu).show();
+ }
return true;
}
@@ -2069,7 +2085,7 @@
}
}
- public android.view.SurfaceHolder.Callback willYouTakeTheSurface() {
+ public android.view.SurfaceHolder.Callback2 willYouTakeTheSurface() {
return mFeatureId < 0 ? mTakeSurfaceCallback : null;
}
@@ -2318,7 +2334,7 @@
} else {
mActionBar = (ActionBarView) findViewById(com.android.internal.R.id.action_bar);
if (mActionBar != null && mActionBar.getTitle() == null) {
- mActionBar.setTitle(mTitle);
+ mActionBar.setWindowTitle(mTitle);
}
}
}
@@ -2797,11 +2813,11 @@
* <li> Calls back to the callback's onMenuItemSelected when an item is
* selected.
*/
- private final class ContextMenuCallback implements MenuBuilder.Callback {
+ private final class DialogMenuCallback implements MenuBuilder.Callback {
private int mFeatureId;
private MenuDialogHelper mSubMenuHelper;
- public ContextMenuCallback(int featureId) {
+ public DialogMenuCallback(int featureId) {
mFeatureId = featureId;
}
diff --git a/libs/audioflinger/A2dpAudioInterface.cpp b/services/audioflinger/A2dpAudioInterface.cpp
similarity index 100%
rename from libs/audioflinger/A2dpAudioInterface.cpp
rename to services/audioflinger/A2dpAudioInterface.cpp
diff --git a/libs/audioflinger/A2dpAudioInterface.h b/services/audioflinger/A2dpAudioInterface.h
similarity index 100%
rename from libs/audioflinger/A2dpAudioInterface.h
rename to services/audioflinger/A2dpAudioInterface.h
diff --git a/libs/audioflinger/Android.mk b/services/audioflinger/Android.mk
similarity index 100%
rename from libs/audioflinger/Android.mk
rename to services/audioflinger/Android.mk
diff --git a/libs/audioflinger/AudioBufferProvider.h b/services/audioflinger/AudioBufferProvider.h
similarity index 100%
rename from libs/audioflinger/AudioBufferProvider.h
rename to services/audioflinger/AudioBufferProvider.h
diff --git a/libs/audioflinger/AudioDumpInterface.cpp b/services/audioflinger/AudioDumpInterface.cpp
similarity index 100%
rename from libs/audioflinger/AudioDumpInterface.cpp
rename to services/audioflinger/AudioDumpInterface.cpp
diff --git a/libs/audioflinger/AudioDumpInterface.h b/services/audioflinger/AudioDumpInterface.h
similarity index 100%
rename from libs/audioflinger/AudioDumpInterface.h
rename to services/audioflinger/AudioDumpInterface.h
diff --git a/libs/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
similarity index 98%
rename from libs/audioflinger/AudioFlinger.cpp
rename to services/audioflinger/AudioFlinger.cpp
index e6f46ce..771d885 100644
--- a/libs/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -17,8 +17,7 @@
#define LOG_TAG "AudioFlinger"
-//
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include <math.h>
#include <signal.h>
@@ -5085,15 +5084,53 @@
}
}
+void AudioFlinger::EffectModule::updateState() {
+ Mutex::Autolock _l(mLock);
+
+ switch (mState) {
+ case RESTART:
+ reset_l();
+ // FALL THROUGH
+
+ case STARTING:
+ // clear auxiliary effect input buffer for next accumulation
+ if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
+ memset(mConfig.inputCfg.buffer.raw,
+ 0,
+ mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
+ }
+ start_l();
+ mState = ACTIVE;
+ break;
+ case STOPPING:
+ stop_l();
+ mDisableWaitCnt = mMaxDisableWaitCnt;
+ mState = STOPPED;
+ break;
+ case STOPPED:
+ // mDisableWaitCnt is forced to 1 by process() when the engine indicates the end of the
+ // turn off sequence.
+ if (--mDisableWaitCnt == 0) {
+ reset_l();
+ mState = IDLE;
+ }
+ break;
+ default: //IDLE , ACTIVE
+ break;
+ }
+}
+
void AudioFlinger::EffectModule::process()
{
Mutex::Autolock _l(mLock);
- if (mEffectInterface == NULL || mConfig.inputCfg.buffer.raw == NULL || mConfig.outputCfg.buffer.raw == NULL) {
+ if (mEffectInterface == NULL ||
+ mConfig.inputCfg.buffer.raw == NULL ||
+ mConfig.outputCfg.buffer.raw == NULL) {
return;
}
- if (mState != IDLE) {
+ if (mState == ACTIVE || mState == STOPPING || mState == STOPPED) {
// do 32 bit to 16 bit conversion for auxiliary effect input buffer
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
AudioMixer::ditherAndClamp(mConfig.inputCfg.buffer.s32,
@@ -5101,33 +5138,15 @@
mConfig.inputCfg.buffer.frameCount);
}
- // TODO: handle effects with buffer provider
- if (mState != ACTIVE) {
- switch (mState) {
- case RESET:
- reset_l();
- mState = STARTING;
- // clear auxiliary effect input buffer for next accumulation
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- memset(mConfig.inputCfg.buffer.raw, 0, mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
- }
- return;
- case STARTING:
- start_l();
- mState = ACTIVE;
- break;
- case STOPPING:
- mState = STOPPED;
- break;
- case STOPPED:
- stop_l();
- mState = IDLE;
- return;
- }
- }
-
// do the actual processing in the effect engine
- (*mEffectInterface)->process(mEffectInterface, &mConfig.inputCfg.buffer, &mConfig.outputCfg.buffer);
+ int ret = (*mEffectInterface)->process(mEffectInterface,
+ &mConfig.inputCfg.buffer,
+ &mConfig.outputCfg.buffer);
+
+ // force transition to IDLE state when engine is ready
+ if (mState == STOPPED && ret == -ENODATA) {
+ mDisableWaitCnt = 1;
+ }
// clear auxiliary effect input buffer for next accumulation
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
@@ -5216,6 +5235,10 @@
if (status == 0) {
status = cmdStatus;
}
+
+ mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
+ (1000 * mConfig.outputCfg.buffer.frameCount);
+
return status;
}
@@ -5292,21 +5315,19 @@
switch (mState) {
// going from disabled to enabled
case IDLE:
- mState = RESET;
+ mState = STARTING;
+ break;
+ case STOPPED:
+ mState = RESTART;
break;
case STOPPING:
mState = ACTIVE;
break;
- case STOPPED:
- mState = STARTING;
- break;
// going from enabled to disabled
- case RESET:
- mState = IDLE;
- break;
+ case RESTART:
case STARTING:
- mState = STOPPED;
+ mState = IDLE;
break;
case ACTIVE:
mState = STOPPING;
@@ -5325,7 +5346,7 @@
bool AudioFlinger::EffectModule::isEnabled()
{
switch (mState) {
- case RESET:
+ case RESTART:
case STARTING:
case ACTIVE:
return true;
@@ -5772,6 +5793,9 @@
for (size_t i = 0; i < size; i++) {
mEffects[i]->process();
}
+ for (size_t i = 0; i < size; i++) {
+ mEffects[i]->updateState();
+ }
// if no track is active, input buffer must be cleared here as the mixer process
// will not do it
if (mSessionId > 0 && activeTracks() == 0) {
@@ -6044,11 +6068,4 @@
return BnAudioFlinger::onTransact(code, data, reply, flags);
}
-// ----------------------------------------------------------------------------
-
-void AudioFlinger::instantiate() {
- defaultServiceManager()->addService(
- String16("media.audio_flinger"), new AudioFlinger());
-}
-
}; // namespace android
diff --git a/libs/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
similarity index 98%
rename from libs/audioflinger/AudioFlinger.h
rename to services/audioflinger/AudioFlinger.h
index ec3d7f1..7013d76 100644
--- a/libs/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -31,10 +31,12 @@
#include <utils/Atomic.h>
#include <utils/Errors.h>
#include <utils/threads.h>
-#include <binder/MemoryDealer.h>
#include <utils/SortedVector.h>
#include <utils/Vector.h>
+#include <binder/BinderService.h>
+#include <binder/MemoryDealer.h>
+
#include <hardware_legacy/AudioHardwareInterface.h>
#include "AudioBufferProvider.h"
@@ -58,10 +60,13 @@
static const nsecs_t kStandbyTimeInNsecs = seconds(3);
-class AudioFlinger : public BnAudioFlinger
+class AudioFlinger :
+ public BinderService<AudioFlinger>,
+ public BnAudioFlinger
{
+ friend class BinderService<AudioFlinger>;
public:
- static void instantiate();
+ static char const* getServiceName() { return "media.audio_flinger"; }
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -905,7 +910,7 @@
enum effect_state {
IDLE,
- RESET,
+ RESTART,
STARTING,
ACTIVE,
STOPPING,
@@ -914,6 +919,7 @@
int id() { return mId; }
void process();
+ void updateState();
status_t command(int cmdCode, int cmdSize, void *pCmdData, int *replySize, void *pReplyData);
void reset_l();
@@ -948,6 +954,9 @@
protected:
+ // Maximum time allocated to effect engines to complete the turn off sequence
+ static const uint32_t MAX_DISABLE_TIME_MS = 10000;
+
EffectModule(const EffectModule&);
EffectModule& operator = (const EffectModule&);
@@ -973,6 +982,9 @@
status_t mStatus; // initialization status
uint32_t mState; // current activation state (effect_state)
Vector< wp<EffectHandle> > mHandles; // list of client handles
+ uint32_t mMaxDisableWaitCnt; // maximum grace period before forcing an effect off after
+ // sending disable command.
+ uint32_t mDisableWaitCnt; // current process() calls count during disable period.
};
// The EffectHandle class implements the IEffect interface. It provides resources
diff --git a/libs/audioflinger/AudioHardwareGeneric.cpp b/services/audioflinger/AudioHardwareGeneric.cpp
similarity index 100%
rename from libs/audioflinger/AudioHardwareGeneric.cpp
rename to services/audioflinger/AudioHardwareGeneric.cpp
diff --git a/libs/audioflinger/AudioHardwareGeneric.h b/services/audioflinger/AudioHardwareGeneric.h
similarity index 100%
rename from libs/audioflinger/AudioHardwareGeneric.h
rename to services/audioflinger/AudioHardwareGeneric.h
diff --git a/libs/audioflinger/AudioHardwareInterface.cpp b/services/audioflinger/AudioHardwareInterface.cpp
similarity index 100%
rename from libs/audioflinger/AudioHardwareInterface.cpp
rename to services/audioflinger/AudioHardwareInterface.cpp
diff --git a/libs/audioflinger/AudioHardwareStub.cpp b/services/audioflinger/AudioHardwareStub.cpp
similarity index 100%
rename from libs/audioflinger/AudioHardwareStub.cpp
rename to services/audioflinger/AudioHardwareStub.cpp
diff --git a/libs/audioflinger/AudioHardwareStub.h b/services/audioflinger/AudioHardwareStub.h
similarity index 100%
rename from libs/audioflinger/AudioHardwareStub.h
rename to services/audioflinger/AudioHardwareStub.h
diff --git a/libs/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
similarity index 100%
rename from libs/audioflinger/AudioMixer.cpp
rename to services/audioflinger/AudioMixer.cpp
diff --git a/libs/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
similarity index 100%
rename from libs/audioflinger/AudioMixer.h
rename to services/audioflinger/AudioMixer.h
diff --git a/libs/audioflinger/AudioPolicyManagerBase.cpp b/services/audioflinger/AudioPolicyManagerBase.cpp
similarity index 100%
rename from libs/audioflinger/AudioPolicyManagerBase.cpp
rename to services/audioflinger/AudioPolicyManagerBase.cpp
diff --git a/libs/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
similarity index 100%
rename from libs/audioflinger/AudioPolicyService.cpp
rename to services/audioflinger/AudioPolicyService.cpp
diff --git a/libs/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
similarity index 100%
rename from libs/audioflinger/AudioPolicyService.h
rename to services/audioflinger/AudioPolicyService.h
diff --git a/libs/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
similarity index 100%
rename from libs/audioflinger/AudioResampler.cpp
rename to services/audioflinger/AudioResampler.cpp
diff --git a/libs/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
similarity index 100%
rename from libs/audioflinger/AudioResampler.h
rename to services/audioflinger/AudioResampler.h
diff --git a/libs/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
similarity index 100%
rename from libs/audioflinger/AudioResamplerCubic.cpp
rename to services/audioflinger/AudioResamplerCubic.cpp
diff --git a/libs/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h
similarity index 100%
rename from libs/audioflinger/AudioResamplerCubic.h
rename to services/audioflinger/AudioResamplerCubic.h
diff --git a/libs/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
similarity index 100%
rename from libs/audioflinger/AudioResamplerSinc.cpp
rename to services/audioflinger/AudioResamplerSinc.cpp
diff --git a/libs/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h
similarity index 100%
rename from libs/audioflinger/AudioResamplerSinc.h
rename to services/audioflinger/AudioResamplerSinc.h
diff --git a/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
similarity index 100%
rename from camera/libcameraservice/Android.mk
rename to services/camera/libcameraservice/Android.mk
diff --git a/camera/libcameraservice/CameraHardwareStub.cpp b/services/camera/libcameraservice/CameraHardwareStub.cpp
similarity index 100%
rename from camera/libcameraservice/CameraHardwareStub.cpp
rename to services/camera/libcameraservice/CameraHardwareStub.cpp
diff --git a/camera/libcameraservice/CameraHardwareStub.h b/services/camera/libcameraservice/CameraHardwareStub.h
similarity index 100%
rename from camera/libcameraservice/CameraHardwareStub.h
rename to services/camera/libcameraservice/CameraHardwareStub.h
diff --git a/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
similarity index 99%
rename from camera/libcameraservice/CameraService.cpp
rename to services/camera/libcameraservice/CameraService.cpp
index 10668a4..c786f94 100644
--- a/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -192,11 +192,6 @@
return mClient[cameraId].promote();
}
-void CameraService::instantiate() {
- defaultServiceManager()->addService(String16("media.camera"),
- new CameraService());
-}
-
status_t CameraService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
// Permission checks
diff --git a/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
similarity index 97%
rename from camera/libcameraservice/CameraService.h
rename to services/camera/libcameraservice/CameraService.h
index 8193e77..b0b2d7a 100644
--- a/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -19,6 +19,8 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
#define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
+#include <binder/BinderService.h>
+
#include <camera/ICameraService.h>
#include <camera/CameraHardwareInterface.h>
@@ -30,11 +32,14 @@
class MemoryHeapBase;
class MediaPlayer;
-class CameraService: public BnCameraService
+class CameraService :
+ public BinderService<CameraService>,
+ public BnCameraService
{
class Client;
+ friend class BinderService<CameraService>;
public:
- static void instantiate();
+ static char const* getServiceName() { return "media.camera"; }
CameraService();
virtual ~CameraService();
diff --git a/camera/libcameraservice/CannedJpeg.h b/services/camera/libcameraservice/CannedJpeg.h
similarity index 100%
rename from camera/libcameraservice/CannedJpeg.h
rename to services/camera/libcameraservice/CannedJpeg.h
diff --git a/camera/libcameraservice/FakeCamera.cpp b/services/camera/libcameraservice/FakeCamera.cpp
similarity index 100%
rename from camera/libcameraservice/FakeCamera.cpp
rename to services/camera/libcameraservice/FakeCamera.cpp
diff --git a/camera/libcameraservice/FakeCamera.h b/services/camera/libcameraservice/FakeCamera.h
similarity index 100%
rename from camera/libcameraservice/FakeCamera.h
rename to services/camera/libcameraservice/FakeCamera.h
diff --git a/camera/tests/CameraServiceTest/Android.mk b/services/camera/tests/CameraServiceTest/Android.mk
similarity index 100%
rename from camera/tests/CameraServiceTest/Android.mk
rename to services/camera/tests/CameraServiceTest/Android.mk
diff --git a/camera/tests/CameraServiceTest/CameraServiceTest.cpp b/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
similarity index 100%
rename from camera/tests/CameraServiceTest/CameraServiceTest.cpp
rename to services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
diff --git a/services/java/com/android/server/ConnectivityService.java b/services/java/com/android/server/ConnectivityService.java
index 9ff7de6..9c504fe 100644
--- a/services/java/com/android/server/ConnectivityService.java
+++ b/services/java/com/android/server/ConnectivityService.java
@@ -26,6 +26,7 @@
import android.net.IConnectivityManager;
import android.net.MobileDataStateTracker;
import android.net.NetworkInfo;
+import android.net.NetworkProperties;
import android.net.NetworkStateTracker;
import android.net.wifi.WifiStateTracker;
import android.net.NetworkUtils;
@@ -51,7 +52,10 @@
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
/**
@@ -741,6 +745,7 @@
* specified host is to be routed
* @param hostAddress the IP address of the host to which the route is
* desired
+ * todo - deprecate (only v4!)
* @return {@code true} on success, {@code false} on failure
*/
public boolean requestRouteToHost(int networkType, int hostAddress) {
@@ -757,7 +762,11 @@
}
return false;
}
- return addHostRoute(tracker, hostAddress);
+ try {
+ InetAddress addr = InetAddress.getByAddress(NetworkUtils.v4IntToArray(hostAddress));
+ return addHostRoute(tracker, addr);
+ } catch (UnknownHostException e) {}
+ return false;
}
/**
@@ -765,22 +774,25 @@
* host via the mobile data network.
* @param hostAddress the IP address of the host to which the route is desired,
* in network byte order.
+ * TODO - deprecate
* @return {@code true} on success, {@code false} on failure
*/
- private boolean addHostRoute(NetworkStateTracker nt, int hostAddress) {
+ private boolean addHostRoute(NetworkStateTracker nt, InetAddress hostAddress) {
if (nt.getNetworkInfo().getType() == ConnectivityManager.TYPE_WIFI) {
return false;
}
- String interfaceName = nt.getInterfaceName();
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) return false;
+ String interfaceName = p.getInterfaceName();
if (DBG) {
- Slog.d(TAG, "Requested host route to " + Integer.toHexString(hostAddress) +
- "(" + interfaceName + ")");
+ Slog.d(TAG, "Requested host route to " + hostAddress + "(" + interfaceName + ")");
}
- if (interfaceName != null && hostAddress != -1) {
+ if (interfaceName != null) {
return NetworkUtils.addHostRoute(interfaceName, hostAddress) == 0;
} else {
+ if (DBG) Slog.e(TAG, "addHostRoute failed due to null interface name");
return false;
}
}
@@ -1251,21 +1263,20 @@
}
private void addPrivateDnsRoutes(NetworkStateTracker nt) {
- String interfaceName = nt.getInterfaceName();
boolean privateDnsRouteSet = nt.isPrivateDnsRouteSet();
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) return;
+ String interfaceName = p.getInterfaceName();
if (DBG) {
Slog.d(TAG, "addPrivateDnsRoutes for " + nt +
"(" + interfaceName + ") - mPrivateDnsRouteSet = " + privateDnsRouteSet);
}
- String[] dnsList = getNameServerList(nt.getDnsPropNames());
if (interfaceName != null && !privateDnsRouteSet) {
- for (String addrString : dnsList) {
- int addr = NetworkUtils.lookupHost(addrString);
- if (addr != -1 && addr != 0) {
- if (DBG) Slog.d(TAG, " adding "+addrString+" ("+addr+")");
- NetworkUtils.addHostRoute(interfaceName, addr);
- }
+ Collection<InetAddress> dnsList = p.getDnses();
+ for (InetAddress dns : dnsList) {
+ if (DBG) Slog.d(TAG, " adding " + dns);
+ NetworkUtils.addHostRoute(interfaceName, dns);
}
nt.privateDnsRouteSet(true);
}
@@ -1274,7 +1285,9 @@
private void removePrivateDnsRoutes(NetworkStateTracker nt) {
// TODO - we should do this explicitly but the NetUtils api doesnt
// support this yet - must remove all. No worse than before
- String interfaceName = nt.getInterfaceName();
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) return;
+ String interfaceName = p.getInterfaceName();
boolean privateDnsRouteSet = nt.isPrivateDnsRouteSet();
if (interfaceName != null && privateDnsRouteSet) {
if (DBG) {
@@ -1286,61 +1299,42 @@
}
}
- /**
- * Return the IP addresses of the DNS servers available for this
- * network interface.
- * @param propertyNames the names of the system properties whose values
- * give the IP addresses. Properties with no values are skipped.
- * @return an array of {@code String}s containing the IP addresses
- * of the DNS servers, in dot-notation. This may have fewer
- * non-null entries than the list of names passed in, since
- * some of the passed-in names may have empty values.
- */
- String[] getNameServerList(String[] propertyNames) {
- String[] dnsAddresses = new String[propertyNames.length];
- int i, j;
-
- for (i = 0, j = 0; i < propertyNames.length; i++) {
- String value = SystemProperties.get(propertyNames[i]);
- // The GSM layer sometimes sets a bogus DNS server address of
- // 0.0.0.0
- if (!TextUtils.isEmpty(value) && !TextUtils.equals(value, "0.0.0.0")) {
- dnsAddresses[j++] = value;
- }
- }
- return dnsAddresses;
- }
private void addDefaultRoute(NetworkStateTracker nt) {
- String interfaceName = nt.getInterfaceName();
- int defaultGatewayAddr = nt.getDefaultGatewayAddr();
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) return;
+ String interfaceName = p.getInterfaceName();
+ InetAddress defaultGatewayAddr = p.getGateway();
boolean defaultRouteSet = nt.isDefaultRouteSet();
- NetworkInfo networkInfo = nt.getNetworkInfo();
- if ((interfaceName != null) && (defaultGatewayAddr != 0) &&
- defaultRouteSet == false) {
- if (DBG) {
+ if ((interfaceName != null) && (defaultGatewayAddr != null ) &&
+ (defaultRouteSet == false)) {
+ boolean error = (NetworkUtils.setDefaultRoute(interfaceName, defaultGatewayAddr) < 0);
+
+ if (DBG && !error) {
+ NetworkInfo networkInfo = nt.getNetworkInfo();
Slog.d(TAG, "addDefaultRoute for " + networkInfo.getTypeName() +
" (" + interfaceName + "), GatewayAddr=" + defaultGatewayAddr);
}
- NetworkUtils.setDefaultRoute(interfaceName, defaultGatewayAddr);
- nt.defaultRouteSet(true);
+ nt.defaultRouteSet(!error);
}
}
public void removeDefaultRoute(NetworkStateTracker nt) {
- String interfaceName = nt.getInterfaceName();
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) return;
+ String interfaceName = p.getInterfaceName();
boolean defaultRouteSet = nt.isDefaultRouteSet();
- NetworkInfo networkInfo = nt.getNetworkInfo();
if (interfaceName != null && defaultRouteSet == true) {
- if (DBG) {
+ boolean error = (NetworkUtils.removeDefaultRoute(interfaceName) < 0);
+ if (DBG && !error) {
+ NetworkInfo networkInfo = nt.getNetworkInfo();
Slog.d(TAG, "removeDefaultRoute for " + networkInfo.getTypeName() + " (" +
interfaceName + ")");
}
- NetworkUtils.removeDefaultRoute(interfaceName);
- nt.defaultRouteSet(false);
+ nt.defaultRouteSet(error);
}
}
@@ -1430,12 +1424,14 @@
NetworkStateTracker nt = mNetTrackers[i];
if (nt.getNetworkInfo().isConnected() &&
!nt.isTeardownRequested()) {
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) continue;
List pids = mNetRequestersPids[i];
for (int j=0; j<pids.size(); j++) {
Integer pid = (Integer)pids.get(j);
if (pid.intValue() == myPid) {
- String[] dnsList = getNameServerList(nt.getDnsPropNames());
- writePidDns(dnsList, myPid);
+ Collection<InetAddress> dnses = p.getDnses();
+ writePidDns(dnses, myPid);
if (doBump) {
bumpDns();
}
@@ -1457,12 +1453,10 @@
}
}
- private void writePidDns(String[] dnsList, int pid) {
+ private void writePidDns(Collection <InetAddress> dnses, int pid) {
int j = 1;
- for (String dns : dnsList) {
- if (dns != null && !TextUtils.equals(dns, "0.0.0.0")) {
- SystemProperties.set("net.dns" + j++ + "." + pid, dns);
- }
+ for (InetAddress dns : dnses) {
+ SystemProperties.set("net.dns" + j++ + "." + pid, dns.getHostAddress());
}
}
@@ -1488,17 +1482,17 @@
NetworkStateTracker nt = mNetTrackers[netType];
if (nt != null && nt.getNetworkInfo().isConnected() &&
!nt.isTeardownRequested()) {
- String[] dnsList = getNameServerList(nt.getDnsPropNames());
+ NetworkProperties p = nt.getNetworkProperties();
+ if (p == null) continue;
+ Collection<InetAddress> dnses = p.getDnses();
if (mNetAttributes[netType].isDefault()) {
int j = 1;
- for (String dns : dnsList) {
- if (dns != null && !TextUtils.equals(dns, "0.0.0.0")) {
- if (DBG) {
- Slog.d(TAG, "adding dns " + dns + " for " +
- nt.getNetworkInfo().getTypeName());
- }
- SystemProperties.set("net.dns" + j++, dns);
+ for (InetAddress dns : dnses) {
+ if (DBG) {
+ Slog.d(TAG, "adding dns " + dns + " for " +
+ nt.getNetworkInfo().getTypeName());
}
+ SystemProperties.set("net.dns" + j++, dns.getHostAddress());
}
for (int k=j ; k<mNumDnsEntries; k++) {
if (DBG) Slog.d(TAG, "erasing net.dns" + k);
@@ -1510,7 +1504,7 @@
List pids = mNetRequestersPids[netType];
for (int y=0; y< pids.size(); y++) {
Integer pid = (Integer)pids.get(y);
- writePidDns(dnsList, pid.intValue());
+ writePidDns(dnses, pid.intValue());
}
}
}
diff --git a/services/java/com/android/server/InputMethodManagerService.java b/services/java/com/android/server/InputMethodManagerService.java
index f6e3441..36b3a5e 100644
--- a/services/java/com/android/server/InputMethodManagerService.java
+++ b/services/java/com/android/server/InputMethodManagerService.java
@@ -980,7 +980,7 @@
void setInputMethodLocked(String id) {
InputMethodInfo info = mMethodMap.get(id);
if (info == null) {
- throw new IllegalArgumentException("Unknown id: " + mCurMethodId);
+ throw new IllegalArgumentException("Unknown id: " + id);
}
if (id.equals(mCurMethodId)) {
diff --git a/services/java/com/android/server/PackageManagerService.java b/services/java/com/android/server/PackageManagerService.java
index 57c6975..af2145e 100644
--- a/services/java/com/android/server/PackageManagerService.java
+++ b/services/java/com/android/server/PackageManagerService.java
@@ -4764,7 +4764,9 @@
Log.v(TAG, "Setting .obb path for " + packageName + " to: " + path);
PackageSetting pkgSetting;
final int uid = Binder.getCallingUid();
- boolean allowedByPermission = false;
+ final int permission = mContext.checkCallingPermission(
+ android.Manifest.permission.INSTALL_PACKAGES);
+ final boolean allowedByPermission = (permission == PackageManager.PERMISSION_GRANTED);
synchronized (mPackages) {
pkgSetting = mSettings.mPackages.get(packageName);
if (pkgSetting == null) {
diff --git a/services/java/com/android/server/TelephonyRegistry.java b/services/java/com/android/server/TelephonyRegistry.java
index b1ca7852..f42bc8b 100644
--- a/services/java/com/android/server/TelephonyRegistry.java
+++ b/services/java/com/android/server/TelephonyRegistry.java
@@ -19,6 +19,7 @@
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
+import android.net.NetworkProperties;
import android.os.Binder;
import android.os.Bundle;
import android.os.IBinder;
@@ -34,6 +35,7 @@
import java.util.ArrayList;
import java.io.FileDescriptor;
import java.io.PrintWriter;
+import java.net.NetworkInterface;
import com.android.internal.app.IBatteryStats;
import com.android.internal.telephony.ITelephonyRegistry;
@@ -90,7 +92,7 @@
private ArrayList<String> mConnectedApns;
- private String mDataConnectionInterfaceName = "";
+ private NetworkProperties mDataConnectionProperties;
private Bundle mCellLocation = new Bundle();
@@ -353,7 +355,8 @@
}
public void notifyDataConnection(int state, boolean isDataConnectivityPossible,
- String reason, String apn, String apnType, String interfaceName, int networkType) {
+ String reason, String apn, String apnType, NetworkProperties networkProperties,
+ int networkType) {
if (!checkNotifyPermission("notifyDataConnection()" )) {
return;
}
@@ -380,7 +383,7 @@
mDataConnectionPossible = isDataConnectivityPossible;
mDataConnectionReason = reason;
mDataConnectionApn = apn;
- mDataConnectionInterfaceName = interfaceName;
+ mDataConnectionProperties = networkProperties;
if (mDataConnectionNetworkType != networkType) {
mDataConnectionNetworkType = networkType;
modified = true;
@@ -400,7 +403,7 @@
}
}
broadcastDataConnectionStateChanged(state, isDataConnectivityPossible, reason, apn,
- apnType, interfaceName);
+ apnType, networkProperties);
}
public void notifyDataConnectionFailed(String reason, String apnType) {
@@ -487,7 +490,7 @@
pw.println(" mDataConnectionPossible=" + mDataConnectionPossible);
pw.println(" mDataConnectionReason=" + mDataConnectionReason);
pw.println(" mDataConnectionApn=" + mDataConnectionApn);
- pw.println(" mDataConnectionInterfaceName=" + mDataConnectionInterfaceName);
+ pw.println(" mDataConnectionProperties=" + mDataConnectionProperties);
pw.println(" mCellLocation=" + mCellLocation);
pw.println("registrations: count=" + recordCount);
for (Record r : mRecords) {
@@ -561,7 +564,7 @@
private void broadcastDataConnectionStateChanged(int state,
boolean isDataConnectivityPossible,
- String reason, String apn, String apnType, String interfaceName) {
+ String reason, String apn, String apnType, NetworkProperties networkProperties) {
// Note: not reporting to the battery stats service here, because the
// status bar takes care of that after taking into account all of the
// required info.
@@ -574,9 +577,15 @@
if (reason != null) {
intent.putExtra(Phone.STATE_CHANGE_REASON_KEY, reason);
}
+ if (networkProperties != null) {
+ intent.putExtra(Phone.DATA_NETWORK_PROPERTIES_KEY, networkProperties);
+ NetworkInterface iface = networkProperties.getInterface();
+ if (iface != null) {
+ intent.putExtra(Phone.DATA_IFACE_NAME_KEY, iface.getName());
+ }
+ }
intent.putExtra(Phone.DATA_APN_KEY, apn);
intent.putExtra(Phone.DATA_APN_TYPE_KEY, apnType);
- intent.putExtra(Phone.DATA_IFACE_NAME_KEY, interfaceName);
mContext.sendStickyBroadcast(intent);
}
diff --git a/services/java/com/android/server/ViewServer.java b/services/java/com/android/server/ViewServer.java
index ae00438..b369f71 100644
--- a/services/java/com/android/server/ViewServer.java
+++ b/services/java/com/android/server/ViewServer.java
@@ -21,6 +21,8 @@
import java.net.ServerSocket;
import java.net.Socket;
import java.net.InetAddress;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.io.IOException;
import java.io.BufferedReader;
import java.io.InputStreamReader;
@@ -41,11 +43,13 @@
*/
public static final int VIEW_SERVER_DEFAULT_PORT = 4939;
+ private static final int VIEW_SERVER_MAX_CONNECTIONS = 10;
+
// Debug facility
private static final String LOG_TAG = "ViewServer";
- private static final String VALUE_PROTOCOL_VERSION = "2";
- private static final String VALUE_SERVER_VERSION = "3";
+ private static final String VALUE_PROTOCOL_VERSION = "3";
+ private static final String VALUE_SERVER_VERSION = "4";
// Protocol commands
// Returns the protocol version
@@ -54,6 +58,8 @@
private static final String COMMAND_SERVER_VERSION = "SERVER";
// Lists all of the available windows in the system
private static final String COMMAND_WINDOW_MANAGER_LIST = "LIST";
+ // Keeps a connection open and notifies when the list of windows changes
+ private static final String COMMAND_WINDOW_MANAGER_AUTOLIST = "AUTOLIST";
private ServerSocket mServer;
private Thread mThread;
@@ -61,6 +67,8 @@
private final WindowManagerService mWindowManager;
private final int mPort;
+ private ExecutorService mThreadPool;
+
/**
* Creates a new ViewServer associated with the specified window manager.
* The server uses the default port {@link #VIEW_SERVER_DEFAULT_PORT}. The server
@@ -103,8 +111,9 @@
return false;
}
- mServer = new ServerSocket(mPort, 1, InetAddress.getLocalHost());
+ mServer = new ServerSocket(mPort, VIEW_SERVER_MAX_CONNECTIONS, InetAddress.getLocalHost());
mThread = new Thread(this, "Remote View Server [port=" + mPort + "]");
+ mThreadPool = Executors.newFixedThreadPool(VIEW_SERVER_MAX_CONNECTIONS);
mThread.start();
return true;
@@ -122,7 +131,16 @@
*/
boolean stop() {
if (mThread != null) {
+
mThread.interrupt();
+ if (mThreadPool != null) {
+ try {
+ mThreadPool.shutdownNow();
+ } catch (SecurityException e) {
+ Slog.w(LOG_TAG, "Could not stop all view server threads");
+ }
+ }
+ mThreadPool = null;
mThread = null;
try {
mServer.close();
@@ -152,62 +170,21 @@
* Main server loop.
*/
public void run() {
- final ServerSocket server = mServer;
-
while (Thread.currentThread() == mThread) {
- Socket client = null;
// Any uncaught exception will crash the system process
try {
- client = server.accept();
-
- BufferedReader in = null;
- try {
- in = new BufferedReader(new InputStreamReader(client.getInputStream()), 1024);
-
- final String request = in.readLine();
-
- String command;
- String parameters;
-
- int index = request.indexOf(' ');
- if (index == -1) {
- command = request;
- parameters = "";
- } else {
- command = request.substring(0, index);
- parameters = request.substring(index + 1);
- }
-
- boolean result;
- if (COMMAND_PROTOCOL_VERSION.equalsIgnoreCase(command)) {
- result = writeValue(client, VALUE_PROTOCOL_VERSION);
- } else if (COMMAND_SERVER_VERSION.equalsIgnoreCase(command)) {
- result = writeValue(client, VALUE_SERVER_VERSION);
- } else if (COMMAND_WINDOW_MANAGER_LIST.equalsIgnoreCase(command)) {
- result = mWindowManager.viewServerListWindows(client);
- } else {
- result = mWindowManager.viewServerWindowCommand(client,
- command, parameters);
- }
-
- if (!result) {
- Slog.w(LOG_TAG, "An error occured with the command: " + command);
- }
- } finally {
- if (in != null) {
- in.close();
- }
- }
- } catch (Exception e) {
- Slog.w(LOG_TAG, "Connection error: ", e);
- } finally {
- if (client != null) {
+ Socket client = mServer.accept();
+ if(mThreadPool != null) {
+ mThreadPool.submit(new ViewServerWorker(client));
+ } else {
try {
client.close();
} catch (IOException e) {
e.printStackTrace();
}
}
+ } catch (Exception e) {
+ Slog.w(LOG_TAG, "Connection error: ", e);
}
}
}
@@ -235,4 +212,131 @@
}
return result;
}
+
+ class ViewServerWorker implements Runnable, WindowManagerService.WindowChangeListener {
+ private Socket mClient;
+ private boolean mNeedWindowListUpdate;
+ private boolean mNeedFocusedWindowUpdate;
+ public ViewServerWorker(Socket client) {
+ mClient = client;
+ mNeedWindowListUpdate = false;
+ mNeedFocusedWindowUpdate = false;
+ }
+
+ public void run() {
+
+ BufferedReader in = null;
+ try {
+ in = new BufferedReader(new InputStreamReader(mClient.getInputStream()), 1024);
+
+ final String request = in.readLine();
+
+ String command;
+ String parameters;
+
+ int index = request.indexOf(' ');
+ if (index == -1) {
+ command = request;
+ parameters = "";
+ } else {
+ command = request.substring(0, index);
+ parameters = request.substring(index + 1);
+ }
+
+ boolean result;
+ if (COMMAND_PROTOCOL_VERSION.equalsIgnoreCase(command)) {
+ result = writeValue(mClient, VALUE_PROTOCOL_VERSION);
+ } else if (COMMAND_SERVER_VERSION.equalsIgnoreCase(command)) {
+ result = writeValue(mClient, VALUE_SERVER_VERSION);
+ } else if (COMMAND_WINDOW_MANAGER_LIST.equalsIgnoreCase(command)) {
+ result = mWindowManager.viewServerListWindows(mClient);
+ } else if(COMMAND_WINDOW_MANAGER_AUTOLIST.equalsIgnoreCase(command)) {
+ result = windowManagerAutolistLoop();
+ } else {
+ result = mWindowManager.viewServerWindowCommand(mClient,
+ command, parameters);
+ }
+
+ if (!result) {
+ Slog.w(LOG_TAG, "An error occured with the command: " + command);
+ }
+ } catch(IOException e) {
+ Slog.w(LOG_TAG, "Connection error: ", e);
+ } finally {
+ if (in != null) {
+ try {
+ in.close();
+
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ if (mClient != null) {
+ try {
+ mClient.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ public void windowsChanged() {
+ synchronized(this) {
+ mNeedWindowListUpdate = true;
+ notifyAll();
+ }
+ }
+
+ public void focusChanged() {
+ synchronized(this) {
+ mNeedFocusedWindowUpdate = true;
+ notifyAll();
+ }
+ }
+
+ private boolean windowManagerAutolistLoop() {
+ mWindowManager.addWindowChangeListener(this);
+ BufferedWriter out = null;
+ try {
+ out = new BufferedWriter(new OutputStreamWriter(mClient.getOutputStream()));
+ while (!Thread.interrupted()) {
+ boolean needWindowListUpdate = false;
+ boolean needFocusedWindowUpdate = false;
+ synchronized (this) {
+ while (!mNeedWindowListUpdate && !mNeedFocusedWindowUpdate) {
+ wait();
+ }
+ if (mNeedWindowListUpdate) {
+ mNeedWindowListUpdate = false;
+ needWindowListUpdate = true;
+ }
+ if (mNeedFocusedWindowUpdate) {
+ mNeedFocusedWindowUpdate = false;
+ needFocusedWindowUpdate = true;
+ }
+ }
+ if(needWindowListUpdate) {
+ out.write("LIST UPDATE\n");
+ out.flush();
+ }
+ if(needFocusedWindowUpdate) {
+ out.write("FOCUS UPDATE\n");
+ out.flush();
+ }
+ }
+ } catch (Exception e) {
+ Slog.w(LOG_TAG, "Connection error: ", e);
+ } finally {
+ if (out != null) {
+ try {
+ out.close();
+ } catch (IOException e) {
+ }
+ }
+ mWindowManager.removeWindowChangeListener(this);
+ }
+ return true;
+ }
+ }
}
diff --git a/services/java/com/android/server/WindowManagerService.java b/services/java/com/android/server/WindowManagerService.java
index f56d9b6..3475b2fc 100644
--- a/services/java/com/android/server/WindowManagerService.java
+++ b/services/java/com/android/server/WindowManagerService.java
@@ -488,6 +488,14 @@
boolean mInTouchMode = false;
private ViewServer mViewServer;
+ private ArrayList<WindowChangeListener> mWindowChangeListeners =
+ new ArrayList<WindowChangeListener>();
+ private boolean mWindowsChanged = false;
+
+ public interface WindowChangeListener {
+ public void windowsChanged();
+ public void focusChanged();
+ }
final Configuration mTempConfiguration = new Configuration();
int mScreenLayout = Configuration.SCREENLAYOUT_SIZE_UNDEFINED;
@@ -661,6 +669,7 @@
TAG, "Adding window " + window + " at "
+ (i+1) + " of " + mWindows.size() + " (after " + pos + ")");
mWindows.add(i+1, window);
+ mWindowsChanged = true;
}
private void placeWindowBefore(Object pos, WindowState window) {
@@ -669,6 +678,7 @@
TAG, "Adding window " + window + " at "
+ i + " of " + mWindows.size() + " (before " + pos + ")");
mWindows.add(i, window);
+ mWindowsChanged = true;
}
//This method finds out the index of a window that has the same app token as
@@ -726,6 +736,7 @@
TAG, "Adding window " + win + " at "
+ (newIdx+1) + " of " + N);
localmWindows.add(newIdx+1, win);
+ mWindowsChanged = true;
}
}
}
@@ -808,6 +819,7 @@
TAG, "Adding window " + win + " at "
+ i + " of " + N);
localmWindows.add(i, win);
+ mWindowsChanged = true;
}
}
}
@@ -825,6 +837,7 @@
TAG, "Adding window " + win + " at "
+ i + " of " + N);
localmWindows.add(i, win);
+ mWindowsChanged = true;
}
if (addToToken) {
token.windows.add(tokenWindowsPos, win);
@@ -1033,6 +1046,7 @@
if (DEBUG_WINDOW_MOVEMENT) Slog.v(
TAG, "Adding input method window " + win + " at " + pos);
mWindows.add(pos, win);
+ mWindowsChanged = true;
moveInputMethodDialogsLocked(pos+1);
return;
}
@@ -1074,6 +1088,7 @@
if (wpos < interestingPos) interestingPos--;
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG, "Temp removing at " + wpos + ": " + win);
mWindows.remove(wpos);
+ mWindowsChanged = true;
int NC = win.mChildWindows.size();
while (NC > 0) {
NC--;
@@ -1100,6 +1115,7 @@
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG, "ReAdd removing from " + wpos
+ ": " + win);
mWindows.remove(wpos);
+ mWindowsChanged = true;
reAddWindowLocked(wpos, win);
}
}
@@ -1560,6 +1576,7 @@
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG, "Wallpaper removing at "
+ oldIndex + ": " + wallpaper);
localmWindows.remove(oldIndex);
+ mWindowsChanged = true;
if (oldIndex < foundI) {
foundI--;
}
@@ -1571,6 +1588,7 @@
+ " from " + oldIndex + " to " + foundI);
localmWindows.add(foundI, wallpaper);
+ mWindowsChanged = true;
changed |= ADJUST_WALLPAPER_LAYERS_CHANGED;
}
}
@@ -2077,6 +2095,7 @@
mWindowMap.remove(win.mClient.asBinder());
mWindows.remove(win);
+ mWindowsChanged = true;
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG, "Final remove of window: " + win);
if (mInputMethodWindow == win) {
@@ -3359,6 +3378,7 @@
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG,
"Removing starting window: " + startingWindow);
mWindows.remove(startingWindow);
+ mWindowsChanged = true;
ttoken.windows.remove(startingWindow);
ttoken.allAppWindows.remove(startingWindow);
addWindowToListInOrderLocked(startingWindow, true);
@@ -3840,6 +3860,7 @@
WindowState win = token.windows.get(i);
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG, "Tmp removing app window " + win);
mWindows.remove(win);
+ mWindowsChanged = true;
int j = win.mChildWindows.size();
while (j > 0) {
j--;
@@ -3944,6 +3965,7 @@
mWindows.add(index, win);
index++;
}
+ mWindowsChanged = true;
return index;
}
@@ -4781,6 +4803,48 @@
return success;
}
+ public void addWindowChangeListener(WindowChangeListener listener) {
+ synchronized(mWindowMap) {
+ mWindowChangeListeners.add(listener);
+ }
+ }
+
+ public void removeWindowChangeListener(WindowChangeListener listener) {
+ synchronized(mWindowMap) {
+ mWindowChangeListeners.remove(listener);
+ }
+ }
+
+ private void notifyWindowsChanged() {
+ WindowChangeListener[] windowChangeListeners;
+ synchronized(mWindowMap) {
+ if(mWindowChangeListeners.isEmpty()) {
+ return;
+ }
+ windowChangeListeners = new WindowChangeListener[mWindowChangeListeners.size()];
+ windowChangeListeners = mWindowChangeListeners.toArray(windowChangeListeners);
+ }
+ int N = windowChangeListeners.length;
+ for(int i = 0; i < N; i++) {
+ windowChangeListeners[i].windowsChanged();
+ }
+ }
+
+ private void notifyFocusChanged() {
+ WindowChangeListener[] windowChangeListeners;
+ synchronized(mWindowMap) {
+ if(mWindowChangeListeners.isEmpty()) {
+ return;
+ }
+ windowChangeListeners = new WindowChangeListener[mWindowChangeListeners.size()];
+ windowChangeListeners = mWindowChangeListeners.toArray(windowChangeListeners);
+ }
+ int N = windowChangeListeners.length;
+ for(int i = 0; i < N; i++) {
+ windowChangeListeners[i].focusChanged();
+ }
+ }
+
private WindowState findWindow(int hashCode) {
if (hashCode == -1) {
return getFocusedWindow();
@@ -7668,6 +7732,7 @@
public static final int ENABLE_SCREEN = 16;
public static final int APP_FREEZE_TIMEOUT = 17;
public static final int SEND_NEW_CONFIGURATION = 18;
+ public static final int REPORT_WINDOWS_CHANGE = 19;
private Session mLastReportedHold;
@@ -7719,6 +7784,7 @@
// Ignore if process has died.
}
}
+ notifyFocusChanged();
}
} break;
@@ -7999,6 +8065,16 @@
break;
}
+ case REPORT_WINDOWS_CHANGE: {
+ if (mWindowsChanged) {
+ synchronized (mWindowMap) {
+ mWindowsChanged = false;
+ }
+ notifyWindowsChanged();
+ }
+ break;
+ }
+
}
}
}
@@ -8083,6 +8159,7 @@
WindowState w = (WindowState)mWindows.get(i);
if (w.mAppToken != null) {
WindowState win = (WindowState)mWindows.remove(i);
+ mWindowsChanged = true;
if (DEBUG_WINDOW_MOVEMENT) Slog.v(TAG,
"Rebuild removing window: " + win);
NW--;
@@ -8218,6 +8295,10 @@
requestAnimationLocked(0);
}
}
+ if (mWindowsChanged && !mWindowChangeListeners.isEmpty()) {
+ mH.removeMessages(H.REPORT_WINDOWS_CHANGE);
+ mH.sendMessage(mH.obtainMessage(H.REPORT_WINDOWS_CHANGE));
+ }
} catch (RuntimeException e) {
mInLayout = false;
Slog.e(TAG, "Unhandled exception while layout out windows", e);
diff --git a/services/java/com/android/server/am/ActivityManagerService.java b/services/java/com/android/server/am/ActivityManagerService.java
index 93122c4..58aab08 100644
--- a/services/java/com/android/server/am/ActivityManagerService.java
+++ b/services/java/com/android/server/am/ActivityManagerService.java
@@ -11562,7 +11562,69 @@
}
}
}
-
+
+ public boolean dumpHeap(String process, boolean managed,
+ String path, ParcelFileDescriptor fd) throws RemoteException {
+
+ try {
+ synchronized (this) {
+ // note: hijacking SET_ACTIVITY_WATCHER, but should be changed to
+ // its own permission (same as profileControl).
+ if (checkCallingPermission(android.Manifest.permission.SET_ACTIVITY_WATCHER)
+ != PackageManager.PERMISSION_GRANTED) {
+ throw new SecurityException("Requires permission "
+ + android.Manifest.permission.SET_ACTIVITY_WATCHER);
+ }
+
+ if (fd == null) {
+ throw new IllegalArgumentException("null fd");
+ }
+
+ ProcessRecord proc = null;
+ try {
+ int pid = Integer.parseInt(process);
+ synchronized (mPidsSelfLocked) {
+ proc = mPidsSelfLocked.get(pid);
+ }
+ } catch (NumberFormatException e) {
+ }
+
+ if (proc == null) {
+ HashMap<String, SparseArray<ProcessRecord>> all
+ = mProcessNames.getMap();
+ SparseArray<ProcessRecord> procs = all.get(process);
+ if (procs != null && procs.size() > 0) {
+ proc = procs.valueAt(0);
+ }
+ }
+
+ if (proc == null || proc.thread == null) {
+ throw new IllegalArgumentException("Unknown process: " + process);
+ }
+
+ boolean isSecure = "1".equals(SystemProperties.get(SYSTEM_SECURE, "0"));
+ if (isSecure) {
+ if ((proc.info.flags&ApplicationInfo.FLAG_DEBUGGABLE) == 0) {
+ throw new SecurityException("Process not debuggable: " + proc);
+ }
+ }
+
+ proc.thread.dumpHeap(managed, path, fd);
+ fd = null;
+ return true;
+ }
+ } catch (RemoteException e) {
+ throw new IllegalStateException("Process disappeared");
+ } finally {
+ if (fd != null) {
+ try {
+ fd.close();
+ } catch (IOException e) {
+ }
+ }
+ }
+ }
+
/** In this method we try to acquire our lock to make sure that we have not deadlocked */
public void monitor() {
synchronized (this) { }
diff --git a/services/jni/com_android_server_InputManager.cpp b/services/jni/com_android_server_InputManager.cpp
index fc901f4..26e105a 100644
--- a/services/jni/com_android_server_InputManager.cpp
+++ b/services/jni/com_android_server_InputManager.cpp
@@ -405,28 +405,28 @@
}
bool NativeInputManager::isAppSwitchKey(int32_t keyCode) {
- return keyCode == KEYCODE_HOME || keyCode == KEYCODE_ENDCALL;
+ return keyCode == AKEYCODE_HOME || keyCode == AKEYCODE_ENDCALL;
}
bool NativeInputManager::isPolicyKey(int32_t keyCode, bool isScreenOn) {
// Special keys that the WindowManagerPolicy might care about.
switch (keyCode) {
- case KEYCODE_VOLUME_UP:
- case KEYCODE_VOLUME_DOWN:
- case KEYCODE_ENDCALL:
- case KEYCODE_POWER:
- case KEYCODE_CALL:
- case KEYCODE_HOME:
- case KEYCODE_MENU:
- case KEYCODE_SEARCH:
+ case AKEYCODE_VOLUME_UP:
+ case AKEYCODE_VOLUME_DOWN:
+ case AKEYCODE_ENDCALL:
+ case AKEYCODE_POWER:
+ case AKEYCODE_CALL:
+ case AKEYCODE_HOME:
+ case AKEYCODE_MENU:
+ case AKEYCODE_SEARCH:
// media keys
- case KEYCODE_HEADSETHOOK:
- case KEYCODE_MEDIA_PLAY_PAUSE:
- case KEYCODE_MEDIA_STOP:
- case KEYCODE_MEDIA_NEXT:
- case KEYCODE_MEDIA_PREVIOUS:
- case KEYCODE_MEDIA_REWIND:
- case KEYCODE_MEDIA_FAST_FORWARD:
+ case AKEYCODE_HEADSETHOOK:
+ case AKEYCODE_MEDIA_PLAY_PAUSE:
+ case AKEYCODE_MEDIA_STOP:
+ case AKEYCODE_MEDIA_NEXT:
+ case AKEYCODE_MEDIA_PREVIOUS:
+ case AKEYCODE_MEDIA_REWIND:
+ case AKEYCODE_MEDIA_FAST_FORWARD:
return true;
default:
// We need to pass all keys to the policy in the following cases:
diff --git a/libs/surfaceflinger/Android.mk b/services/surfaceflinger/Android.mk
similarity index 100%
rename from libs/surfaceflinger/Android.mk
rename to services/surfaceflinger/Android.mk
diff --git a/libs/surfaceflinger/Barrier.h b/services/surfaceflinger/Barrier.h
similarity index 100%
rename from libs/surfaceflinger/Barrier.h
rename to services/surfaceflinger/Barrier.h
diff --git a/libs/surfaceflinger/BlurFilter.cpp b/services/surfaceflinger/BlurFilter.cpp
similarity index 100%
rename from libs/surfaceflinger/BlurFilter.cpp
rename to services/surfaceflinger/BlurFilter.cpp
diff --git a/libs/surfaceflinger/BlurFilter.h b/services/surfaceflinger/BlurFilter.h
similarity index 100%
rename from libs/surfaceflinger/BlurFilter.h
rename to services/surfaceflinger/BlurFilter.h
diff --git a/libs/surfaceflinger/DisplayHardware/DisplayHardware.cpp b/services/surfaceflinger/DisplayHardware/DisplayHardware.cpp
similarity index 100%
rename from libs/surfaceflinger/DisplayHardware/DisplayHardware.cpp
rename to services/surfaceflinger/DisplayHardware/DisplayHardware.cpp
diff --git a/libs/surfaceflinger/DisplayHardware/DisplayHardware.h b/services/surfaceflinger/DisplayHardware/DisplayHardware.h
similarity index 100%
rename from libs/surfaceflinger/DisplayHardware/DisplayHardware.h
rename to services/surfaceflinger/DisplayHardware/DisplayHardware.h
diff --git a/libs/surfaceflinger/DisplayHardware/DisplayHardwareBase.cpp b/services/surfaceflinger/DisplayHardware/DisplayHardwareBase.cpp
similarity index 100%
rename from libs/surfaceflinger/DisplayHardware/DisplayHardwareBase.cpp
rename to services/surfaceflinger/DisplayHardware/DisplayHardwareBase.cpp
diff --git a/libs/surfaceflinger/DisplayHardware/DisplayHardwareBase.h b/services/surfaceflinger/DisplayHardware/DisplayHardwareBase.h
similarity index 100%
rename from libs/surfaceflinger/DisplayHardware/DisplayHardwareBase.h
rename to services/surfaceflinger/DisplayHardware/DisplayHardwareBase.h
diff --git a/libs/surfaceflinger/GLExtensions.cpp b/services/surfaceflinger/GLExtensions.cpp
similarity index 100%
rename from libs/surfaceflinger/GLExtensions.cpp
rename to services/surfaceflinger/GLExtensions.cpp
diff --git a/libs/surfaceflinger/GLExtensions.h b/services/surfaceflinger/GLExtensions.h
similarity index 100%
rename from libs/surfaceflinger/GLExtensions.h
rename to services/surfaceflinger/GLExtensions.h
diff --git a/libs/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
similarity index 100%
rename from libs/surfaceflinger/Layer.cpp
rename to services/surfaceflinger/Layer.cpp
diff --git a/libs/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
similarity index 100%
rename from libs/surfaceflinger/Layer.h
rename to services/surfaceflinger/Layer.h
diff --git a/libs/surfaceflinger/LayerBase.cpp b/services/surfaceflinger/LayerBase.cpp
similarity index 100%
rename from libs/surfaceflinger/LayerBase.cpp
rename to services/surfaceflinger/LayerBase.cpp
diff --git a/libs/surfaceflinger/LayerBase.h b/services/surfaceflinger/LayerBase.h
similarity index 100%
rename from libs/surfaceflinger/LayerBase.h
rename to services/surfaceflinger/LayerBase.h
diff --git a/libs/surfaceflinger/LayerBlur.cpp b/services/surfaceflinger/LayerBlur.cpp
similarity index 100%
rename from libs/surfaceflinger/LayerBlur.cpp
rename to services/surfaceflinger/LayerBlur.cpp
diff --git a/libs/surfaceflinger/LayerBlur.h b/services/surfaceflinger/LayerBlur.h
similarity index 100%
rename from libs/surfaceflinger/LayerBlur.h
rename to services/surfaceflinger/LayerBlur.h
diff --git a/libs/surfaceflinger/LayerBuffer.cpp b/services/surfaceflinger/LayerBuffer.cpp
similarity index 100%
rename from libs/surfaceflinger/LayerBuffer.cpp
rename to services/surfaceflinger/LayerBuffer.cpp
diff --git a/libs/surfaceflinger/LayerBuffer.h b/services/surfaceflinger/LayerBuffer.h
similarity index 100%
rename from libs/surfaceflinger/LayerBuffer.h
rename to services/surfaceflinger/LayerBuffer.h
diff --git a/libs/surfaceflinger/LayerDim.cpp b/services/surfaceflinger/LayerDim.cpp
similarity index 100%
rename from libs/surfaceflinger/LayerDim.cpp
rename to services/surfaceflinger/LayerDim.cpp
diff --git a/libs/surfaceflinger/LayerDim.h b/services/surfaceflinger/LayerDim.h
similarity index 100%
rename from libs/surfaceflinger/LayerDim.h
rename to services/surfaceflinger/LayerDim.h
diff --git a/libs/surfaceflinger/MODULE_LICENSE_APACHE2 b/services/surfaceflinger/MODULE_LICENSE_APACHE2
similarity index 100%
rename from libs/surfaceflinger/MODULE_LICENSE_APACHE2
rename to services/surfaceflinger/MODULE_LICENSE_APACHE2
diff --git a/libs/surfaceflinger/MessageQueue.cpp b/services/surfaceflinger/MessageQueue.cpp
similarity index 100%
rename from libs/surfaceflinger/MessageQueue.cpp
rename to services/surfaceflinger/MessageQueue.cpp
diff --git a/libs/surfaceflinger/MessageQueue.h b/services/surfaceflinger/MessageQueue.h
similarity index 100%
rename from libs/surfaceflinger/MessageQueue.h
rename to services/surfaceflinger/MessageQueue.h
diff --git a/libs/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
similarity index 98%
rename from libs/surfaceflinger/SurfaceFlinger.cpp
rename to services/surfaceflinger/SurfaceFlinger.cpp
index 68e8f19..3167c4c 100644
--- a/libs/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -63,20 +63,6 @@
#define DISPLAY_COUNT 1
namespace android {
-
-// ---------------------------------------------------------------------------
-
-void SurfaceFlinger::instantiate() {
- defaultServiceManager()->addService(
- String16("SurfaceFlinger"), new SurfaceFlinger());
-}
-
-void SurfaceFlinger::shutdown() {
- // we should unregister here, but not really because
- // when (if) the service manager goes away, all the services
- // it has a reference to will leave too.
-}
-
// ---------------------------------------------------------------------------
SurfaceFlinger::LayerVector::LayerVector(const SurfaceFlinger::LayerVector& rhs)
diff --git a/libs/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
similarity index 98%
rename from libs/surfaceflinger/SurfaceFlinger.h
rename to services/surfaceflinger/SurfaceFlinger.h
index 0bfc170..8821e5c 100644
--- a/libs/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -29,6 +29,7 @@
#include <binder/IMemory.h>
#include <binder/Permission.h>
+#include <binder/BinderService.h>
#include <ui/PixelFormat.h>
#include <surfaceflinger/ISurfaceComposer.h>
@@ -167,11 +168,13 @@
eTraversalNeeded = 0x02
};
-class SurfaceFlinger : public BnSurfaceComposer, protected Thread
+class SurfaceFlinger :
+ public BinderService<SurfaceFlinger>,
+ public BnSurfaceComposer,
+ protected Thread
{
public:
- static void instantiate();
- static void shutdown();
+ static char const* getServiceName() { return "SurfaceFlinger"; }
SurfaceFlinger();
virtual ~SurfaceFlinger();
diff --git a/libs/surfaceflinger/TextureManager.cpp b/services/surfaceflinger/TextureManager.cpp
similarity index 99%
rename from libs/surfaceflinger/TextureManager.cpp
rename to services/surfaceflinger/TextureManager.cpp
index 6526032..3b326df 100644
--- a/libs/surfaceflinger/TextureManager.cpp
+++ b/services/surfaceflinger/TextureManager.cpp
@@ -107,7 +107,6 @@
{
switch (format) {
case HAL_PIXEL_FORMAT_YV12:
- case HAL_PIXEL_FORMAT_YV16:
return true;
}
return false;
@@ -118,7 +117,6 @@
switch (format) {
// supported YUV formats
case HAL_PIXEL_FORMAT_YV12:
- case HAL_PIXEL_FORMAT_YV16:
// Legacy/deprecated YUV formats
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
diff --git a/libs/surfaceflinger/TextureManager.h b/services/surfaceflinger/TextureManager.h
similarity index 100%
rename from libs/surfaceflinger/TextureManager.h
rename to services/surfaceflinger/TextureManager.h
diff --git a/libs/surfaceflinger/Transform.cpp b/services/surfaceflinger/Transform.cpp
similarity index 100%
rename from libs/surfaceflinger/Transform.cpp
rename to services/surfaceflinger/Transform.cpp
diff --git a/libs/surfaceflinger/Transform.h b/services/surfaceflinger/Transform.h
similarity index 100%
rename from libs/surfaceflinger/Transform.h
rename to services/surfaceflinger/Transform.h
diff --git a/libs/surfaceflinger/clz.cpp b/services/surfaceflinger/clz.cpp
similarity index 100%
rename from libs/surfaceflinger/clz.cpp
rename to services/surfaceflinger/clz.cpp
diff --git a/libs/surfaceflinger/clz.h b/services/surfaceflinger/clz.h
similarity index 100%
rename from libs/surfaceflinger/clz.h
rename to services/surfaceflinger/clz.h
diff --git a/libs/surfaceflinger/tests/Android.mk b/services/surfaceflinger/tests/Android.mk
similarity index 100%
rename from libs/surfaceflinger/tests/Android.mk
rename to services/surfaceflinger/tests/Android.mk
diff --git a/libs/surfaceflinger/tests/overlays/Android.mk b/services/surfaceflinger/tests/overlays/Android.mk
similarity index 100%
rename from libs/surfaceflinger/tests/overlays/Android.mk
rename to services/surfaceflinger/tests/overlays/Android.mk
diff --git a/libs/surfaceflinger/tests/overlays/overlays.cpp b/services/surfaceflinger/tests/overlays/overlays.cpp
similarity index 100%
rename from libs/surfaceflinger/tests/overlays/overlays.cpp
rename to services/surfaceflinger/tests/overlays/overlays.cpp
diff --git a/libs/surfaceflinger/tests/resize/Android.mk b/services/surfaceflinger/tests/resize/Android.mk
similarity index 100%
rename from libs/surfaceflinger/tests/resize/Android.mk
rename to services/surfaceflinger/tests/resize/Android.mk
diff --git a/libs/surfaceflinger/tests/resize/resize.cpp b/services/surfaceflinger/tests/resize/resize.cpp
similarity index 100%
rename from libs/surfaceflinger/tests/resize/resize.cpp
rename to services/surfaceflinger/tests/resize/resize.cpp
diff --git a/telephony/java/com/android/internal/telephony/DataConnectionTracker.java b/telephony/java/com/android/internal/telephony/DataConnectionTracker.java
index e71fe2e..06807c6 100644
--- a/telephony/java/com/android/internal/telephony/DataConnectionTracker.java
+++ b/telephony/java/com/android/internal/telephony/DataConnectionTracker.java
@@ -17,6 +17,7 @@
package com.android.internal.telephony;
import android.app.PendingIntent;
+import android.net.NetworkProperties;
import android.os.AsyncResult;
import android.os.Handler;
import android.os.Message;
@@ -26,6 +27,10 @@
import android.text.TextUtils;
import android.util.Log;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.net.UnknownHostException;
import java.util.ArrayList;
/**
@@ -191,6 +196,9 @@
/** indication of our availability (preconditions to trysetupData are met) **/
protected boolean mAvailability = false;
+ /** all our network properties (dns, gateway, ip, etc) */
+ protected NetworkProperties mNetworkProperties;
+
/**
* Default constructor
*/
@@ -424,6 +432,15 @@
protected abstract void setState(State s);
+ protected NetworkProperties getNetworkProperties(String apnType) {
+ int id = apnTypeToId(apnType);
+ if (isApnIdEnabled(id)) {
+ return mNetworkProperties;
+ } else {
+ return null;
+ }
+ }
+
// tell all active apns of the current condition
protected void notifyDataConnection(String reason) {
for (int id = 0; id < APN_NUM_TYPES; id++) {
@@ -668,5 +685,43 @@
}
}
+ protected NetworkProperties makeNetworkProperties(DataConnection connection) {
+ NetworkProperties properties = new NetworkProperties();
+ try {
+ properties.setInterface(NetworkInterface.getByName(connection.getInterface()));
+ } catch (SocketException e) {
+ Log.e(LOG_TAG, "SocketException creating NetworkInterface: " + e);
+ } catch (NullPointerException e) {
+ Log.e(LOG_TAG, "NPE trying to makeNetworkProperties: " + e);
+ }
+ try {
+ properties.addAddress(InetAddress.getByName(connection.getIpAddress()));
+ } catch (UnknownHostException e) {
+ Log.e(LOG_TAG, "UnknownHostException setting IpAddress: " + e);
+ } catch (SecurityException e) {
+ Log.e(LOG_TAG, "SecurityException setting IpAddress: " + e);
+ }
+
+ try {
+ properties.setGateway(InetAddress.getByName(connection.getGatewayAddress()));
+ } catch (UnknownHostException e) {
+ Log.e(LOG_TAG, "UnknownHostException setting GatewayAddress: " + e);
+ } catch (SecurityException e) {
+ Log.e(LOG_TAG, "SecurityException setting GatewayAddress: " + e);
+ }
+
+ try {
+ String[] dnsStrings = connection.getDnsServers();
+ for (int i = 0; i<dnsStrings.length; i++) {
+ properties.addDns(InetAddress.getByName(dnsStrings[i]));
+ }
+ } catch (UnknownHostException e) {
+ Log.e(LOG_TAG, "UnknownHostException setting DnsAddress: " + e);
+ } catch (SecurityException e) {
+ Log.e(LOG_TAG, "SecurityException setting DnsAddress: " + e);
+ }
+ // TODO - set Proxy info
+ return properties;
+ }
}
diff --git a/telephony/java/com/android/internal/telephony/DefaultPhoneNotifier.java b/telephony/java/com/android/internal/telephony/DefaultPhoneNotifier.java
index 0646101..382c19f 100644
--- a/telephony/java/com/android/internal/telephony/DefaultPhoneNotifier.java
+++ b/telephony/java/com/android/internal/telephony/DefaultPhoneNotifier.java
@@ -16,6 +16,7 @@
package com.android.internal.telephony;
+import android.net.NetworkProperties;
import android.os.Bundle;
import android.os.RemoteException;
import android.os.ServiceManager;
@@ -107,13 +108,17 @@
// use apnType as the key to which connection we're talking about.
// pass apnType back up to fetch particular for this one.
TelephonyManager telephony = TelephonyManager.getDefault();
+ NetworkProperties networkProperties = null;
+ if (state == Phone.DataState.CONNECTED) {
+ networkProperties = sender.getNetworkProperties(apnType);
+ }
try {
mRegistry.notifyDataConnection(
convertDataState(state),
sender.isDataConnectivityPossible(), reason,
sender.getActiveApn(),
apnType,
- sender.getInterfaceName(null),
+ networkProperties,
((telephony!=null) ? telephony.getNetworkType() :
TelephonyManager.NETWORK_TYPE_UNKNOWN));
} catch (RemoteException ex) {
diff --git a/telephony/java/com/android/internal/telephony/ITelephonyRegistry.aidl b/telephony/java/com/android/internal/telephony/ITelephonyRegistry.aidl
index 79c2b40..f7b70ee 100644
--- a/telephony/java/com/android/internal/telephony/ITelephonyRegistry.aidl
+++ b/telephony/java/com/android/internal/telephony/ITelephonyRegistry.aidl
@@ -17,6 +17,7 @@
package com.android.internal.telephony;
import android.content.Intent;
+import android.net.NetworkProperties;
import android.os.Bundle;
import android.telephony.ServiceState;
import android.telephony.SignalStrength;
@@ -32,7 +33,8 @@
void notifyCallForwardingChanged(boolean cfi);
void notifyDataActivity(int state);
void notifyDataConnection(int state, boolean isDataConnectivityPossible,
- String reason, String apn, String apnType, String interfaceName, int networkType);
+ String reason, String apn, String apnType, in NetworkProperties networkProperties,
+ int networkType);
void notifyDataConnectionFailed(String reason, String apnType);
void notifyCellLocation(in Bundle cellLocation);
}
diff --git a/telephony/java/com/android/internal/telephony/IccPhoneBookInterfaceManager.java b/telephony/java/com/android/internal/telephony/IccPhoneBookInterfaceManager.java
index 9f8e57f..48257cc 100644
--- a/telephony/java/com/android/internal/telephony/IccPhoneBookInterfaceManager.java
+++ b/telephony/java/com/android/internal/telephony/IccPhoneBookInterfaceManager.java
@@ -62,8 +62,8 @@
logd("GET_RECORD_SIZE Size " + recordSize[0] +
" total " + recordSize[1] +
" #record " + recordSize[2]);
- mLock.notifyAll();
}
+ mLock.notifyAll();
}
break;
case EVENT_UPDATE_DONE:
diff --git a/telephony/java/com/android/internal/telephony/MccTable.java b/telephony/java/com/android/internal/telephony/MccTable.java
index b73c2f7..5dd29af 100644
--- a/telephony/java/com/android/internal/telephony/MccTable.java
+++ b/telephony/java/com/android/internal/telephony/MccTable.java
@@ -23,346 +23,13 @@
import android.net.wifi.WifiManager;
import android.os.RemoteException;
import android.os.SystemProperties;
-import android.provider.Settings;
import android.text.TextUtils;
import android.util.Log;
-import java.util.Arrays;
-
-/**
- * The table below is built from two resources:
- *
- * 1) ITU "Mobile Network Code (MNC) for the international
- * identification plan for mobile terminals and mobile users"
- * which is available as an annex to the ITU operational bulletin
- * available here: http://www.itu.int/itu-t/bulletin/annex.html
- *
- * 2) The ISO 3166 country codes list, available here:
- * http://www.iso.org/iso/en/prods-services/iso3166ma/02iso-3166-code-lists/index.html
- *
- * This table was verified (28 Aug 2009) against
- * http://en.wikipedia.org/wiki/List_of_mobile_country_codes with the
- * only unresolved discrepancy being that this list has an extra entry
- * (461) for China.
- *
- * TODO: Complete the mappings for timezones and language/locale codes.
- *
- * The actual table data used in the Java code is generated from the
- * below Python code for efficiency. The information is expected to
- * be static, but if changes are required, the table in the python
- * code can be modified and the trailing code run to re-generate the
- * tables that are to be used by Java.
-
-mcc_table = [
- (202, 'gr', 2, 'Greece'),
- (204, 'nl', 2, 'Europe/Amsterdam', 'nl', 13, 'Netherlands (Kingdom of the)'),
- (206, 'be', 2, 'Belgium'),
- (208, 'fr', 2, 'Europe/Paris', 'fr', 'France'),
- (212, 'mc', 2, 'Monaco (Principality of)'),
- (213, 'ad', 2, 'Andorra (Principality of)'),
- (214, 'es', 2, 'Europe/Madrid', 'es', 'Spain'),
- (216, 'hu', 2, 'Hungary (Republic of)'),
- (218, 'ba', 2, 'Bosnia and Herzegovina'),
- (219, 'hr', 2, 'Croatia (Republic of)'),
- (220, 'rs', 2, 'Serbia and Montenegro'),
- (222, 'it', 2, 'Europe/Rome', 'it', 'Italy'),
- (225, 'va', 2, 'Europe/Rome', 'it', 'Vatican City State'),
- (226, 'ro', 2, 'Romania'),
- (228, 'ch', 2, 'Europe/Zurich', 'de', 'Switzerland (Confederation of)'),
- (230, 'cz', 2, 'Europe/Prague', 'cs', 13, 'Czech Republic'),
- (231, 'sk', 2, 'Slovak Republic'),
- (232, 'at', 2, 'Europe/Vienna', 'de', 13, 'Austria'),
- (234, 'gb', 2, 'Europe/London', 'en', 13, 'United Kingdom of Great Britain and Northern Ireland'),
- (235, 'gb', 2, 'Europe/London', 'en', 13, 'United Kingdom of Great Britain and Northern Ireland'),
- (238, 'dk', 2, 'Denmark'),
- (240, 'se', 2, 'Sweden'),
- (242, 'no', 2, 'Norway'),
- (244, 'fi', 2, 'Finland'),
- (246, 'lt', 2, 'Lithuania (Republic of)'),
- (247, 'lv', 2, 'Latvia (Republic of)'),
- (248, 'ee', 2, 'Estonia (Republic of)'),
- (250, 'ru', 2, 'Russian Federation'),
- (255, 'ua', 2, 'Ukraine'),
- (257, 'by', 2, 'Belarus (Republic of)'),
- (259, 'md', 2, 'Moldova (Republic of)'),
- (260, 'pl', 2, 'Europe/Warsaw', 'Poland (Republic of)'),
- (262, 'de', 2, 'Europe/Berlin', 'de', 13, 'Germany (Federal Republic of)'),
- (266, 'gi', 2, 'Gibraltar'),
- (268, 'pt', 2, 'Portugal'),
- (270, 'lu', 2, 'Luxembourg'),
- (272, 'ie', 2, 'Europe/Dublin', 'en', 'Ireland'),
- (274, 'is', 2, 'Iceland'),
- (276, 'al', 2, 'Albania (Republic of)'),
- (278, 'mt', 2, 'Malta'),
- (280, 'cy', 2, 'Cyprus (Republic of)'),
- (282, 'ge', 2, 'Georgia'),
- (283, 'am', 2, 'Armenia (Republic of)'),
- (284, 'bg', 2, 'Bulgaria (Republic of)'),
- (286, 'tr', 2, 'Turkey'),
- (288, 'fo', 2, 'Faroe Islands'),
- (289, 'ge', 2, 'Abkhazia (Georgia)'),
- (290, 'gl', 2, 'Greenland (Denmark)'),
- (292, 'sm', 2, 'San Marino (Republic of)'),
- (293, 'sl', 2, 'Slovenia (Republic of)'),
- (294, 'mk', 2, 'The Former Yugoslav Republic of Macedonia'),
- (295, 'li', 2, 'Liechtenstein (Principality of)'),
- (297, 'me', 2, 'Montenegro (Republic of)'),
- (302, 'ca', 3, '', '', 11, 'Canada'),
- (308, 'pm', 2, 'Saint Pierre and Miquelon (Collectivit territoriale de la Rpublique franaise)'),
- (310, 'us', 3, '', 'en', 11, 'United States of America'),
- (311, 'us', 3, '', 'en', 11, 'United States of America'),
- (312, 'us', 3, '', 'en', 11, 'United States of America'),
- (313, 'us', 3, '', 'en', 11, 'United States of America'),
- (314, 'us', 3, '', 'en', 11, 'United States of America'),
- (315, 'us', 3, '', 'en', 11, 'United States of America'),
- (316, 'us', 3, '', 'en', 11, 'United States of America'),
- (330, 'pr', 2, 'Puerto Rico'),
- (332, 'vi', 2, 'United States Virgin Islands'),
- (334, 'mx', 3, 'Mexico'),
- (338, 'jm', 3, 'Jamaica'),
- (340, 'gp', 2, 'Guadeloupe (French Department of)'),
- (342, 'bb', 3, 'Barbados'),
- (344, 'ag', 3, 'Antigua and Barbuda'),
- (346, 'ky', 3, 'Cayman Islands'),
- (348, 'vg', 3, 'British Virgin Islands'),
- (350, 'bm', 2, 'Bermuda'),
- (352, 'gd', 2, 'Grenada'),
- (354, 'ms', 2, 'Montserrat'),
- (356, 'kn', 2, 'Saint Kitts and Nevis'),
- (358, 'lc', 2, 'Saint Lucia'),
- (360, 'vc', 2, 'Saint Vincent and the Grenadines'),
- (362, 'nl', 2, 'Netherlands Antilles'),
- (363, 'aw', 2, 'Aruba'),
- (364, 'bs', 2, 'Bahamas (Commonwealth of the)'),
- (365, 'ai', 3, 'Anguilla'),
- (366, 'dm', 2, 'Dominica (Commonwealth of)'),
- (368, 'cu', 2, 'Cuba'),
- (370, 'do', 2, 'Dominican Republic'),
- (372, 'ht', 2, 'Haiti (Republic of)'),
- (374, 'tt', 2, 'Trinidad and Tobago'),
- (376, 'tc', 2, 'Turks and Caicos Islands'),
- (400, 'az', 2, 'Azerbaijani Republic'),
- (401, 'kz', 2, 'Kazakhstan (Republic of)'),
- (402, 'bt', 2, 'Bhutan (Kingdom of)'),
- (404, 'in', 2, 'India (Republic of)'),
- (405, 'in', 2, 'India (Republic of)'),
- (410, 'pk', 2, 'Pakistan (Islamic Republic of)'),
- (412, 'af', 2, 'Afghanistan'),
- (413, 'lk', 2, 'Sri Lanka (Democratic Socialist Republic of)'),
- (414, 'mm', 2, 'Myanmar (Union of)'),
- (415, 'lb', 2, 'Lebanon'),
- (416, 'jo', 2, 'Jordan (Hashemite Kingdom of)'),
- (417, 'sy', 2, 'Syrian Arab Republic'),
- (418, 'iq', 2, 'Iraq (Republic of)'),
- (419, 'kw', 2, 'Kuwait (State of)'),
- (420, 'sa', 2, 'Saudi Arabia (Kingdom of)'),
- (421, 'ye', 2, 'Yemen (Republic of)'),
- (422, 'om', 2, 'Oman (Sultanate of)'),
- (423, 'ps', 2, 'Palestine'),
- (424, 'ae', 2, 'United Arab Emirates'),
- (425, 'il', 2, 'Israel (State of)'),
- (426, 'bh', 2, 'Bahrain (Kingdom of)'),
- (427, 'qa', 2, 'Qatar (State of)'),
- (428, 'mn', 2, 'Mongolia'),
- (429, 'np', 2, 'Nepal'),
- (430, 'ae', 2, 'United Arab Emirates'),
- (431, 'ae', 2, 'United Arab Emirates'),
- (432, 'ir', 2, 'Iran (Islamic Republic of)'),
- (434, 'uz', 2, 'Uzbekistan (Republic of)'),
- (436, 'tj', 2, 'Tajikistan (Republic of)'),
- (437, 'kg', 2, 'Kyrgyz Republic'),
- (438, 'tm', 2, 'Turkmenistan'),
- (440, 'jp', 2, 'Asia/Tokyo', 'ja', 14, 'Japan'),
- (441, 'jp', 2, 'Asia/Tokyo', 'ja', 14, 'Japan'),
- (450, 'kr', 2, 'Korea (Republic of)'),
- (452, 'vn', 2, 'Viet Nam (Socialist Republic of)'),
- (454, 'hk', 2, '"Hong Kong, China"'),
- (455, 'mo', 2, '"Macao, China"'),
- (456, 'kh', 2, 'Cambodia (Kingdom of)'),
- (457, 'la', 2, "Lao People's Democratic Republic"),
- (460, 'cn', 2, "Asia/Beijing", 'zh', 13, "China (People's Republic of)"),
- (461, 'cn', 2, "Asia/Beijing", 'zh', 13, "China (People's Republic of)"),
- (466, 'tw', 2, "Taiwan (Republic of China)"),
- (467, 'kp', 2, "Democratic People's Republic of Korea"),
- (470, 'bd', 2, "Bangladesh (People's Republic of)"),
- (472, 'mv', 2, 'Maldives (Republic of)'),
- (502, 'my', 2, 'Malaysia'),
- (505, 'au', 2, 'Australia/Sydney', 'en', 11, 'Australia'),
- (510, 'id', 2, 'Indonesia (Republic of)'),
- (514, 'tl', 2, 'Democratic Republic of Timor-Leste'),
- (515, 'ph', 2, 'Philippines (Republic of the)'),
- (520, 'th', 2, 'Thailand'),
- (525, 'sg', 2, 'Asia/Singapore', 'en', 11, 'Singapore (Republic of)'),
- (528, 'bn', 2, 'Brunei Darussalam'),
- (530, 'nz', 2, 'Pacific/Auckland', 'en', 'New Zealand'),
- (534, 'mp', 2, 'Northern Mariana Islands (Commonwealth of the)'),
- (535, 'gu', 2, 'Guam'),
- (536, 'nr', 2, 'Nauru (Republic of)'),
- (537, 'pg', 2, 'Papua New Guinea'),
- (539, 'to', 2, 'Tonga (Kingdom of)'),
- (540, 'sb', 2, 'Solomon Islands'),
- (541, 'vu', 2, 'Vanuatu (Republic of)'),
- (542, 'fj', 2, 'Fiji (Republic of)'),
- (543, 'wf', 2, "Wallis and Futuna (Territoire franais d'outre-mer)"),
- (544, 'as', 2, 'American Samoa'),
- (545, 'ki', 2, 'Kiribati (Republic of)'),
- (546, 'nc', 2, "New Caledonia (Territoire franais d'outre-mer)"),
- (547, 'pf', 2, "French Polynesia (Territoire franais d'outre-mer)"),
- (548, 'ck', 2, 'Cook Islands'),
- (549, 'ws', 2, 'Samoa (Independent State of)'),
- (550, 'fm', 2, 'Micronesia (Federated States of)'),
- (551, 'mh', 2, 'Marshall Islands (Republic of the)'),
- (552, 'pw', 2, 'Palau (Republic of)'),
- (602, 'eg', 2, 'Egypt (Arab Republic of)'),
- (603, 'dz', 2, "Algeria (People's Democratic Republic of)"),
- (604, 'ma', 2, 'Morocco (Kingdom of)'),
- (605, 'tn', 2, 'Tunisia'),
- (606, 'ly', 2, "Libya (Socialist People's Libyan Arab Jamahiriya)"),
- (607, 'gm', 2, 'Gambia (Republic of the)'),
- (608, 'sn', 2, 'Senegal (Republic of)'),
- (609, 'mr', 2, 'Mauritania (Islamic Republic of)'),
- (610, 'ml', 2, 'Mali (Republic of)'),
- (611, 'gn', 2, 'Guinea (Republic of)'),
- (612, 'ci', 2, "Cte d'Ivoire (Republic of)"),
- (613, 'bf', 2, 'Burkina Faso'),
- (614, 'ne', 2, 'Niger (Republic of the)'),
- (615, 'tg', 2, 'Togolese Republic'),
- (616, 'bj', 2, 'Benin (Republic of)'),
- (617, 'mu', 2, 'Mauritius (Republic of)'),
- (618, 'lr', 2, 'Liberia (Republic of)'),
- (619, 'sl', 2, 'Sierra Leone'),
- (620, 'gh', 2, 'Ghana'),
- (621, 'ng', 2, 'Nigeria (Federal Republic of)'),
- (622, 'td', 2, 'Chad (Republic of)'),
- (623, 'cf', 2, 'Central African Republic'),
- (624, 'cm', 2, 'Cameroon (Republic of)'),
- (625, 'cv', 2, 'Cape Verde (Republic of)'),
- (626, 'st', 2, 'Sao Tome and Principe (Democratic Republic of)'),
- (627, 'gq', 2, 'Equatorial Guinea (Republic of)'),
- (628, 'ga', 2, 'Gabonese Republic'),
- (629, 'cg', 2, 'Congo (Republic of the)'),
- (630, 'cg', 2, 'Democratic Republic of the Congo'),
- (631, 'ao', 2, 'Angola (Republic of)'),
- (632, 'gw', 2, 'Guinea-Bissau (Republic of)'),
- (633, 'sc', 2, 'Seychelles (Republic of)'),
- (634, 'sd', 2, 'Sudan (Republic of the)'),
- (635, 'rw', 2, 'Rwanda (Republic of)'),
- (636, 'et', 2, 'Ethiopia (Federal Democratic Republic of)'),
- (637, 'so', 2, 'Somali Democratic Republic'),
- (638, 'dj', 2, 'Djibouti (Republic of)'),
- (639, 'ke', 2, 'Kenya (Republic of)'),
- (640, 'tz', 2, 'Tanzania (United Republic of)'),
- (641, 'ug', 2, 'Uganda (Republic of)'),
- (642, 'bi', 2, 'Burundi (Republic of)'),
- (643, 'mz', 2, 'Mozambique (Republic of)'),
- (645, 'zm', 2, 'Zambia (Republic of)'),
- (646, 'mg', 2, 'Madagascar (Republic of)'),
- (647, 're', 2, 'Reunion (French Department of)'),
- (648, 'zw', 2, 'Zimbabwe (Republic of)'),
- (649, 'na', 2, 'Namibia (Republic of)'),
- (650, 'mw', 2, 'Malawi'),
- (651, 'ls', 2, 'Lesotho (Kingdom of)'),
- (652, 'bw', 2, 'Botswana (Republic of)'),
- (653, 'sz', 2, 'Swaziland (Kingdom of)'),
- (654, 'km', 2, 'Comoros (Union of the)'),
- (655, 'za', 2, 'Africa/Johannesburg', 'en', 'South Africa (Republic of)'),
- (657, 'er', 2, 'Eritrea'),
- (702, 'bz', 2, 'Belize'),
- (704, 'gt', 2, 'Guatemala (Republic of)'),
- (706, 'sv', 2, 'El Salvador (Republic of)'),
- (708, 'hn', 3, 'Honduras (Republic of)'),
- (710, 'ni', 2, 'Nicaragua'),
- (712, 'cr', 2, 'Costa Rica'),
- (714, 'pa', 2, 'Panama (Republic of)'),
- (716, 'pe', 2, 'Peru'),
- (722, 'ar', 3, 'Argentine Republic'),
- (724, 'br', 2, 'Brazil (Federative Republic of)'),
- (730, 'cl', 2, 'Chile'),
- (732, 'co', 3, 'Colombia (Republic of)'),
- (734, 've', 2, 'Venezuela (Bolivarian Republic of)'),
- (736, 'bo', 2, 'Bolivia (Republic of)'),
- (738, 'gy', 2, 'Guyana'),
- (740, 'ec', 2, 'Ecuador'),
- (742, 'gf', 2, 'French Guiana (French Department of)'),
- (744, 'py', 2, 'Paraguay (Republic of)'),
- (746, 'sr', 2, 'Suriname (Republic of)'),
- (748, 'uy', 2, 'Uruguay (Eastern Republic of)'),
- (750, 'fk', 2, 'Falkland Islands (Malvinas)')]
-
-get_mcc = lambda elt: elt[0]
-get_iso = lambda elt: elt[1]
-get_sd = lambda elt: elt[2]
-get_tz = lambda elt: len(elt) > 4 and elt[3] or ''
-get_lang = lambda elt: len(elt) > 5 and elt[4] or ''
-get_wifi = lambda elt: len(elt) > 6 and elt[5] or 0
-
-mcc_codes = ['0x%04x' % get_mcc(elt) for elt in mcc_table]
-tz_set = sorted(x for x in set(get_tz(elt) for elt in mcc_table))
-lang_set = sorted(x for x in set(get_lang(elt) for elt in mcc_table))
-
-def mk_ind_code(elt):
- iso = get_iso(elt)
- iso_code = ((ord(iso[0]) << 8) | ord(iso[1])) & 0xFFFF # 16 bits
- wifi = get_wifi(elt) & 0x000F # 4 bits
- sd = get_sd(elt) & 0x0003 # 2 bits
- tz_ind = tz_set.index(get_tz(elt)) & 0x001F # 5 bits
- lang_ind = lang_set.index(get_lang(elt)) & 0x000F # 4 bits
- return (iso_code << 16) | (wifi << 11) | (sd << 9) | (tz_ind << 4) | lang_ind
-
-ind_codes = ['0x%08x' % mk_ind_code(elt) for elt in mcc_table]
-
-def fmt_list(title, l, batch_sz):
- sl = []
- for i in range(len(l) / batch_sz + (len(l) % batch_sz and 1 or 0)):
- j = i * batch_sz
- sl.append((' ' * 8) + ', '.join(l[j:j + batch_sz]))
- return ' private static final %s = {\n' % title + ',\n'.join(sl) + '\n };\n'
-
-def do_autogen_comment(extra_desc=[]):
- print ' /' + '**\n * AUTO GENERATED (by the Python code above)'
- for line in extra_desc:
- print ' * %s' % line
- print ' *' + '/'
-
-do_autogen_comment()
-print fmt_list('String[] TZ_STRINGS', ['"%s"' % x for x in tz_set], 1)
-do_autogen_comment()
-print fmt_list('String[] LANG_STRINGS', ['"%s"' % x for x in lang_set], 10)
-do_autogen_comment(['This table is a list of MCC codes. The index in this table',
- 'of a given MCC code is the index of extra information about',
- 'that MCC in the IND_CODES table.'])
-print fmt_list('short[] MCC_CODES', mcc_codes, 10)
-do_autogen_comment(['The values in this table are broken down as follows (msb to lsb):',
- ' iso country code 16 bits',
- ' (unused) 1 bit',
- ' wifi channel 4 bits',
- ' smalled digit 2 bits',
- ' default timezone 5 bits',
- ' default language 4 bits'])
-print fmt_list('int[] IND_CODES', ind_codes, 6)
-
-def parse_ind_code(ind):
- mcc = eval(mcc_codes[ind])
- code = eval(ind_codes[ind])
- iso_lsb = int((code >> 16) & 0x00FF)
- iso_msb = int((code >> 24) & 0x00FF)
- iso = '%s%s' % (chr(iso_msb), chr(iso_lsb))
- wifi = int((code >> 11) & 0x000F)
- sd = int((code >> 9) & 0x0003)
- tz_ind = (code >> 4) & 0x001F
- lang_ind = (code >> 0) & 0x000F
- return (mcc, iso, sd, tz_set[tz_ind], lang_set[lang_ind], wifi)
-
-fmt_str = 'mcc = %s, iso = %s, sd = %s, tz = %s, lang = %s, wifi = %s'
-orig_table = [fmt_str % (get_mcc(elt), get_iso(elt), get_sd(elt),
- get_tz(elt), get_lang(elt), get_wifi(elt))
- for elt in mcc_table]
-derived_table = [fmt_str % parse_ind_code(i) for i in range(len(ind_codes))]
-for i in range(len(orig_table)):
- if orig_table[i] == derived_table[i]: continue
- print 'MISMATCH ERROR : ', orig_table[i], " != ", derived_table[i]
-
-*/
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Locale;
+import libcore.icu.TimeZones;
/**
* Mobile Country Code
@@ -371,200 +38,153 @@
*/
public final class MccTable
{
- /**
- * AUTO GENERATED (by the Python code above)
- */
- private static final String[] TZ_STRINGS = {
- "",
- "Africa/Johannesburg",
- "Asia/Beijing",
- "Asia/Singapore",
- "Asia/Tokyo",
- "Australia/Sydney",
- "Europe/Amsterdam",
- "Europe/Berlin",
- "Europe/Dublin",
- "Europe/London",
- "Europe/Madrid",
- "Europe/Paris",
- "Europe/Prague",
- "Europe/Rome",
- "Europe/Vienna",
- "Europe/Warsaw",
- "Europe/Zurich",
- "Pacific/Auckland"
- };
-
- /**
- * AUTO GENERATED (by the Python code above)
- */
- private static final String[] LANG_STRINGS = {
- "", "cs", "de", "en", "es", "fr", "it", "ja", "nl", "zh"
- };
-
- /**
- * AUTO GENERATED (by the Python code above)
- * This table is a list of MCC codes. The index in this table
- * of a given MCC code is the index of extra information about
- * that MCC in the IND_CODES table.
- */
- private static final short[] MCC_CODES = {
- 0x00ca, 0x00cc, 0x00ce, 0x00d0, 0x00d4, 0x00d5, 0x00d6, 0x00d8, 0x00da, 0x00db,
- 0x00dc, 0x00de, 0x00e1, 0x00e2, 0x00e4, 0x00e6, 0x00e7, 0x00e8, 0x00ea, 0x00eb,
- 0x00ee, 0x00f0, 0x00f2, 0x00f4, 0x00f6, 0x00f7, 0x00f8, 0x00fa, 0x00ff, 0x0101,
- 0x0103, 0x0104, 0x0106, 0x010a, 0x010c, 0x010e, 0x0110, 0x0112, 0x0114, 0x0116,
- 0x0118, 0x011a, 0x011b, 0x011c, 0x011e, 0x0120, 0x0121, 0x0122, 0x0124, 0x0125,
- 0x0126, 0x0127, 0x0129, 0x012e, 0x0134, 0x0136, 0x0137, 0x0138, 0x0139, 0x013a,
- 0x013b, 0x013c, 0x014a, 0x014c, 0x014e, 0x0152, 0x0154, 0x0156, 0x0158, 0x015a,
- 0x015c, 0x015e, 0x0160, 0x0162, 0x0164, 0x0166, 0x0168, 0x016a, 0x016b, 0x016c,
- 0x016d, 0x016e, 0x0170, 0x0172, 0x0174, 0x0176, 0x0178, 0x0190, 0x0191, 0x0192,
- 0x0194, 0x0195, 0x019a, 0x019c, 0x019d, 0x019e, 0x019f, 0x01a0, 0x01a1, 0x01a2,
- 0x01a3, 0x01a4, 0x01a5, 0x01a6, 0x01a7, 0x01a8, 0x01a9, 0x01aa, 0x01ab, 0x01ac,
- 0x01ad, 0x01ae, 0x01af, 0x01b0, 0x01b2, 0x01b4, 0x01b5, 0x01b6, 0x01b8, 0x01b9,
- 0x01c2, 0x01c4, 0x01c6, 0x01c7, 0x01c8, 0x01c9, 0x01cc, 0x01cd, 0x01d2, 0x01d3,
- 0x01d6, 0x01d8, 0x01f6, 0x01f9, 0x01fe, 0x0202, 0x0203, 0x0208, 0x020d, 0x0210,
- 0x0212, 0x0216, 0x0217, 0x0218, 0x0219, 0x021b, 0x021c, 0x021d, 0x021e, 0x021f,
- 0x0220, 0x0221, 0x0222, 0x0223, 0x0224, 0x0225, 0x0226, 0x0227, 0x0228, 0x025a,
- 0x025b, 0x025c, 0x025d, 0x025e, 0x025f, 0x0260, 0x0261, 0x0262, 0x0263, 0x0264,
- 0x0265, 0x0266, 0x0267, 0x0268, 0x0269, 0x026a, 0x026b, 0x026c, 0x026d, 0x026e,
- 0x026f, 0x0270, 0x0271, 0x0272, 0x0273, 0x0274, 0x0275, 0x0276, 0x0277, 0x0278,
- 0x0279, 0x027a, 0x027b, 0x027c, 0x027d, 0x027e, 0x027f, 0x0280, 0x0281, 0x0282,
- 0x0283, 0x0285, 0x0286, 0x0287, 0x0288, 0x0289, 0x028a, 0x028b, 0x028c, 0x028d,
- 0x028e, 0x028f, 0x0291, 0x02be, 0x02c0, 0x02c2, 0x02c4, 0x02c6, 0x02c8, 0x02ca,
- 0x02cc, 0x02d2, 0x02d4, 0x02da, 0x02dc, 0x02de, 0x02e0, 0x02e2, 0x02e4, 0x02e6,
- 0x02e8, 0x02ea, 0x02ec, 0x02ee
- };
-
- /**
- * AUTO GENERATED (by the Python code above)
- * The values in this table are broken down as follows (msb to lsb):
- * iso country code 16 bits
- * (unused) 1 bit
- * wifi channel 4 bits
- * smalled digit 2 bits
- * default timezone 5 bits
- * default language 4 bits
- */
- private static final int[] IND_CODES = {
- 0x67720400, 0x6e6c6c68, 0x62650400, 0x667204b5, 0x6d630400, 0x61640400,
- 0x657304a4, 0x68750400, 0x62610400, 0x68720400, 0x72730400, 0x697404d6,
- 0x766104d6, 0x726f0400, 0x63680502, 0x637a6cc1, 0x736b0400, 0x61746ce2,
- 0x67626c93, 0x67626c93, 0x646b0400, 0x73650400, 0x6e6f0400, 0x66690400,
- 0x6c740400, 0x6c760400, 0x65650400, 0x72750400, 0x75610400, 0x62790400,
- 0x6d640400, 0x706c04f0, 0x64656c72, 0x67690400, 0x70740400, 0x6c750400,
- 0x69650483, 0x69730400, 0x616c0400, 0x6d740400, 0x63790400, 0x67650400,
- 0x616d0400, 0x62670400, 0x74720400, 0x666f0400, 0x67650400, 0x676c0400,
- 0x736d0400, 0x736c0400, 0x6d6b0400, 0x6c690400, 0x6d650400, 0x63615e00,
- 0x706d0400, 0x75735e03, 0x75735e03, 0x75735e03, 0x75735e03, 0x75735e03,
- 0x75735e03, 0x75735e03, 0x70720400, 0x76690400, 0x6d780600, 0x6a6d0600,
- 0x67700400, 0x62620600, 0x61670600, 0x6b790600, 0x76670600, 0x626d0400,
- 0x67640400, 0x6d730400, 0x6b6e0400, 0x6c630400, 0x76630400, 0x6e6c0400,
- 0x61770400, 0x62730400, 0x61690600, 0x646d0400, 0x63750400, 0x646f0400,
- 0x68740400, 0x74740400, 0x74630400, 0x617a0400, 0x6b7a0400, 0x62740400,
- 0x696e0400, 0x696e0400, 0x706b0400, 0x61660400, 0x6c6b0400, 0x6d6d0400,
- 0x6c620400, 0x6a6f0400, 0x73790400, 0x69710400, 0x6b770400, 0x73610400,
- 0x79650400, 0x6f6d0400, 0x70730400, 0x61650400, 0x696c0400, 0x62680400,
- 0x71610400, 0x6d6e0400, 0x6e700400, 0x61650400, 0x61650400, 0x69720400,
- 0x757a0400, 0x746a0400, 0x6b670400, 0x746d0400, 0x6a707447, 0x6a707447,
- 0x6b720400, 0x766e0400, 0x686b0400, 0x6d6f0400, 0x6b680400, 0x6c610400,
- 0x636e6c29, 0x636e6c29, 0x74770400, 0x6b700400, 0x62640400, 0x6d760400,
- 0x6d790400, 0x61755c53, 0x69640400, 0x746c0400, 0x70680400, 0x74680400,
- 0x73675c33, 0x626e0400, 0x6e7a0513, 0x6d700400, 0x67750400, 0x6e720400,
- 0x70670400, 0x746f0400, 0x73620400, 0x76750400, 0x666a0400, 0x77660400,
- 0x61730400, 0x6b690400, 0x6e630400, 0x70660400, 0x636b0400, 0x77730400,
- 0x666d0400, 0x6d680400, 0x70770400, 0x65670400, 0x647a0400, 0x6d610400,
- 0x746e0400, 0x6c790400, 0x676d0400, 0x736e0400, 0x6d720400, 0x6d6c0400,
- 0x676e0400, 0x63690400, 0x62660400, 0x6e650400, 0x74670400, 0x626a0400,
- 0x6d750400, 0x6c720400, 0x736c0400, 0x67680400, 0x6e670400, 0x74640400,
- 0x63660400, 0x636d0400, 0x63760400, 0x73740400, 0x67710400, 0x67610400,
- 0x63670400, 0x63670400, 0x616f0400, 0x67770400, 0x73630400, 0x73640400,
- 0x72770400, 0x65740400, 0x736f0400, 0x646a0400, 0x6b650400, 0x747a0400,
- 0x75670400, 0x62690400, 0x6d7a0400, 0x7a6d0400, 0x6d670400, 0x72650400,
- 0x7a770400, 0x6e610400, 0x6d770400, 0x6c730400, 0x62770400, 0x737a0400,
- 0x6b6d0400, 0x7a610413, 0x65720400, 0x627a0400, 0x67740400, 0x73760400,
- 0x686e0600, 0x6e690400, 0x63720400, 0x70610400, 0x70650400, 0x61720600,
- 0x62720400, 0x636c0400, 0x636f0600, 0x76650400, 0x626f0400, 0x67790400,
- 0x65630400, 0x67660400, 0x70790400, 0x73720400, 0x75790400, 0x666b0400
- };
-
static final String LOG_TAG = "MccTable";
+ static ArrayList<MccEntry> table;
+
+ static class MccEntry implements Comparable<MccEntry>
+ {
+ int mcc;
+ String iso;
+ int smallestDigitsMnc;
+ String language;
+ int wifiChannels;
+
+ MccEntry(int mnc, String iso, int smallestDigitsMCC) {
+ this(mnc, iso, smallestDigitsMCC, null);
+ }
+
+ MccEntry(int mnc, String iso, int smallestDigitsMCC, String language) {
+ this(mnc, iso, smallestDigitsMCC, language, 0);
+ }
+
+ MccEntry(int mnc, String iso, int smallestDigitsMCC, String language, int wifiChannels) {
+ this.mcc = mnc;
+ this.iso = iso;
+ this.smallestDigitsMnc = smallestDigitsMCC;
+ this.language = language;
+ this.wifiChannels = wifiChannels;
+ }
+
+
+ public int compareTo(MccEntry o)
+ {
+ return mcc - o.mcc;
+ }
+ }
+
+ private static MccEntry
+ entryForMcc(int mcc)
+ {
+ int index;
+
+ MccEntry m;
+
+ m = new MccEntry(mcc, null, 0);
+
+ index = Collections.binarySearch(table, m);
+
+ if (index < 0) {
+ return null;
+ } else {
+ return table.get(index);
+ }
+ }
+
/**
- * Given a Mobile Country Code, returns a default time zone ID
- * if available. Returns null if unavailable.
+ * Returns a default time zone ID for the given MCC.
+ * @param mcc Mobile Country Code
+ * @return default TimeZone ID, or null if not specified
*/
public static String defaultTimeZoneForMcc(int mcc) {
- int index = Arrays.binarySearch(MCC_CODES, (short)mcc);
- if (index < 0) {
+ MccEntry entry;
+
+ entry = entryForMcc(mcc);
+ if (entry == null || entry.iso == null) {
return null;
+ } else {
+ Locale locale;
+ if (entry.language == null) {
+ locale = new Locale(entry.iso);
+ } else {
+ locale = new Locale(entry.language, entry.iso);
+ }
+ String[] tz = TimeZones.forLocale(locale);
+ if (tz.length == 0) return null;
+ return tz[0];
}
- int indCode = IND_CODES[index];
- int tzInd = (indCode >>> 4) & 0x001F;
- String tz = TZ_STRINGS[tzInd];
- if (tz == "") {
- return null;
- }
- return tz;
}
/**
- * Given a Mobile Country Code, returns an ISO two-character
- * country code if available. Returns "" if unavailable.
+ * Given a GSM Mobile Country Code, returns
+ * an ISO two-character country code if available.
+ * Returns "" if unavailable.
*/
- public static String countryCodeForMcc(int mcc) {
- int index = Arrays.binarySearch(MCC_CODES, (short)mcc);
- if (index < 0) {
+ public static String
+ countryCodeForMcc(int mcc)
+ {
+ MccEntry entry;
+
+ entry = entryForMcc(mcc);
+
+ if (entry == null) {
return "";
+ } else {
+ return entry.iso;
}
- int indCode = IND_CODES[index];
- byte[] iso = {(byte)((indCode >>> 24) & 0x00FF), (byte)((indCode >>> 16) & 0x00FF)};
- return new String(iso);
}
/**
- * Given a GSM Mobile Country Code, returns an ISO 2-3 character
- * language code if available. Returns null if unavailable.
+ * Given a GSM Mobile Country Code, returns
+ * an ISO 2-3 character language code if available.
+ * Returns null if unavailable.
*/
public static String defaultLanguageForMcc(int mcc) {
- int index = Arrays.binarySearch(MCC_CODES, (short)mcc);
- if (index < 0) {
+ MccEntry entry;
+
+ entry = entryForMcc(mcc);
+
+ if (entry == null) {
return null;
+ } else {
+ return entry.language;
}
- int indCode = IND_CODES[index];
- int langInd = indCode & 0x000F;
- String lang = LANG_STRINGS[langInd];
- if (lang == "") {
- return null;
- }
- return lang;
}
/**
- * Given a GSM Mobile Country Code, returns the corresponding
- * smallest number of digits field. Returns 2 if unavailable.
+ * Given a GSM Mobile Country Code, returns
+ * the smallest number of digits that M if available.
+ * Returns 2 if unavailable.
*/
- public static int smallestDigitsMccForMnc(int mcc) {
- int index = Arrays.binarySearch(MCC_CODES, (short)mcc);
- if (index < 0) {
+ public static int
+ smallestDigitsMccForMnc(int mcc)
+ {
+ MccEntry entry;
+
+ entry = entryForMcc(mcc);
+
+ if (entry == null) {
return 2;
+ } else {
+ return entry.smallestDigitsMnc;
}
- int indCode = IND_CODES[index];
- int smDig = (indCode >>> 9) & 0x0003;
- return smDig;
}
/**
* Given a GSM Mobile Country Code, returns the number of wifi
* channels allowed in that country. Returns 0 if unavailable.
*/
- public static int wifiChannelsForMcc(int mcc) {
- int index = Arrays.binarySearch(MCC_CODES, (short)mcc);
- if (index < 0) {
+ public static int
+ wifiChannelsForMcc(int mcc) {
+ MccEntry entry;
+
+ entry = entryForMcc(mcc);
+
+ if (entry == null) {
return 0;
+ } else {
+ return entry.wifiChannels;
}
- int indCode = IND_CODES[index];
- int wifi = (indCode >>> 11) & 0x000F;
- return wifi;
}
/**
@@ -656,4 +276,262 @@
wM.setNumAllowedChannels(wifiChannels, true);
}
}
+
+ static {
+ table = new ArrayList<MccEntry>(240);
+
+
+ /*
+ * The table below is built from two resources:
+ *
+ * 1) ITU "Mobile Network Code (MNC) for the international
+ * identification plan for mobile terminals and mobile users"
+ * which is available as an annex to the ITU operational bulletin
+ * available here: http://www.itu.int/itu-t/bulletin/annex.html
+ *
+ * 2) The ISO 3166 country codes list, available here:
+ * http://www.iso.org/iso/en/prods-services/iso3166ma/02iso-3166-code-lists/index.html
+ *
+ * This table has not been verified.
+ *
+ */
+
+ table.add(new MccEntry(202,"gr",2)); //Greece
+ table.add(new MccEntry(204,"nl",2,"nl",13)); //Netherlands (Kingdom of the)
+ table.add(new MccEntry(206,"be",2)); //Belgium
+ table.add(new MccEntry(208,"fr",2,"fr")); //France
+ table.add(new MccEntry(212,"mc",2)); //Monaco (Principality of)
+ table.add(new MccEntry(213,"ad",2)); //Andorra (Principality of)
+ table.add(new MccEntry(214,"es",2,"es")); //Spain
+ table.add(new MccEntry(216,"hu",2)); //Hungary (Republic of)
+ table.add(new MccEntry(218,"ba",2)); //Bosnia and Herzegovina
+ table.add(new MccEntry(219,"hr",2)); //Croatia (Republic of)
+ table.add(new MccEntry(220,"rs",2)); //Serbia and Montenegro
+ table.add(new MccEntry(222,"it",2,"it")); //Italy
+ table.add(new MccEntry(225,"va",2,"it")); //Vatican City State
+ table.add(new MccEntry(226,"ro",2)); //Romania
+ table.add(new MccEntry(228,"ch",2,"de")); //Switzerland (Confederation of)
+ table.add(new MccEntry(230,"cz",2,"cs",13)); //Czech Republic
+ table.add(new MccEntry(231,"sk",2)); //Slovak Republic
+ table.add(new MccEntry(232,"at",2,"de",13)); //Austria
+ table.add(new MccEntry(234,"gb",2,"en",13)); //United Kingdom of Great Britain and Northern Ireland
+ table.add(new MccEntry(235,"gb",2,"en",13)); //United Kingdom of Great Britain and Northern Ireland
+ table.add(new MccEntry(238,"dk",2)); //Denmark
+ table.add(new MccEntry(240,"se",2)); //Sweden
+ table.add(new MccEntry(242,"no",2)); //Norway
+ table.add(new MccEntry(244,"fi",2)); //Finland
+ table.add(new MccEntry(246,"lt",2)); //Lithuania (Republic of)
+ table.add(new MccEntry(247,"lv",2)); //Latvia (Republic of)
+ table.add(new MccEntry(248,"ee",2)); //Estonia (Republic of)
+ table.add(new MccEntry(250,"ru",2)); //Russian Federation
+ table.add(new MccEntry(255,"ua",2)); //Ukraine
+ table.add(new MccEntry(257,"by",2)); //Belarus (Republic of)
+ table.add(new MccEntry(259,"md",2)); //Moldova (Republic of)
+ table.add(new MccEntry(260,"pl",2)); //Poland (Republic of)
+ table.add(new MccEntry(262,"de",2,"de",13)); //Germany (Federal Republic of)
+ table.add(new MccEntry(266,"gi",2)); //Gibraltar
+ table.add(new MccEntry(268,"pt",2)); //Portugal
+ table.add(new MccEntry(270,"lu",2)); //Luxembourg
+ table.add(new MccEntry(272,"ie",2,"en")); //Ireland
+ table.add(new MccEntry(274,"is",2)); //Iceland
+ table.add(new MccEntry(276,"al",2)); //Albania (Republic of)
+ table.add(new MccEntry(278,"mt",2)); //Malta
+ table.add(new MccEntry(280,"cy",2)); //Cyprus (Republic of)
+ table.add(new MccEntry(282,"ge",2)); //Georgia
+ table.add(new MccEntry(283,"am",2)); //Armenia (Republic of)
+ table.add(new MccEntry(284,"bg",2)); //Bulgaria (Republic of)
+ table.add(new MccEntry(286,"tr",2)); //Turkey
+ table.add(new MccEntry(288,"fo",2)); //Faroe Islands
+ table.add(new MccEntry(289,"ge",2)); //Abkhazia (Georgia)
+ table.add(new MccEntry(290,"gl",2)); //Greenland (Denmark)
+ table.add(new MccEntry(292,"sm",2)); //San Marino (Republic of)
+ table.add(new MccEntry(293,"sl",2)); //Slovenia (Republic of)
+ table.add(new MccEntry(294,"mk",2)); //The Former Yugoslav Republic of Macedonia
+ table.add(new MccEntry(295,"li",2)); //Liechtenstein (Principality of)
+ table.add(new MccEntry(297,"me",2)); //Montenegro (Republic of)
+ table.add(new MccEntry(302,"ca",3,"",11)); //Canada
+ table.add(new MccEntry(308,"pm",2)); //Saint Pierre and Miquelon (Collectivit territoriale de la Rpublique franaise)
+ table.add(new MccEntry(310,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(311,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(312,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(313,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(314,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(315,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(316,"us",3,"en",11)); //United States of America
+ table.add(new MccEntry(330,"pr",2)); //Puerto Rico
+ table.add(new MccEntry(332,"vi",2)); //United States Virgin Islands
+ table.add(new MccEntry(334,"mx",3)); //Mexico
+ table.add(new MccEntry(338,"jm",3)); //Jamaica
+ table.add(new MccEntry(340,"gp",2)); //Guadeloupe (French Department of)
+ table.add(new MccEntry(342,"bb",3)); //Barbados
+ table.add(new MccEntry(344,"ag",3)); //Antigua and Barbuda
+ table.add(new MccEntry(346,"ky",3)); //Cayman Islands
+ table.add(new MccEntry(348,"vg",3)); //British Virgin Islands
+ table.add(new MccEntry(350,"bm",2)); //Bermuda
+ table.add(new MccEntry(352,"gd",2)); //Grenada
+ table.add(new MccEntry(354,"ms",2)); //Montserrat
+ table.add(new MccEntry(356,"kn",2)); //Saint Kitts and Nevis
+ table.add(new MccEntry(358,"lc",2)); //Saint Lucia
+ table.add(new MccEntry(360,"vc",2)); //Saint Vincent and the Grenadines
+ table.add(new MccEntry(362,"nl",2)); //Netherlands Antilles
+ table.add(new MccEntry(363,"aw",2)); //Aruba
+ table.add(new MccEntry(364,"bs",2)); //Bahamas (Commonwealth of the)
+ table.add(new MccEntry(365,"ai",3)); //Anguilla
+ table.add(new MccEntry(366,"dm",2)); //Dominica (Commonwealth of)
+ table.add(new MccEntry(368,"cu",2)); //Cuba
+ table.add(new MccEntry(370,"do",2)); //Dominican Republic
+ table.add(new MccEntry(372,"ht",2)); //Haiti (Republic of)
+ table.add(new MccEntry(374,"tt",2)); //Trinidad and Tobago
+ table.add(new MccEntry(376,"tc",2)); //Turks and Caicos Islands
+ table.add(new MccEntry(400,"az",2)); //Azerbaijani Republic
+ table.add(new MccEntry(401,"kz",2)); //Kazakhstan (Republic of)
+ table.add(new MccEntry(402,"bt",2)); //Bhutan (Kingdom of)
+ table.add(new MccEntry(404,"in",2)); //India (Republic of)
+ table.add(new MccEntry(405,"in",2)); //India (Republic of)
+ table.add(new MccEntry(410,"pk",2)); //Pakistan (Islamic Republic of)
+ table.add(new MccEntry(412,"af",2)); //Afghanistan
+ table.add(new MccEntry(413,"lk",2)); //Sri Lanka (Democratic Socialist Republic of)
+ table.add(new MccEntry(414,"mm",2)); //Myanmar (Union of)
+ table.add(new MccEntry(415,"lb",2)); //Lebanon
+ table.add(new MccEntry(416,"jo",2)); //Jordan (Hashemite Kingdom of)
+ table.add(new MccEntry(417,"sy",2)); //Syrian Arab Republic
+ table.add(new MccEntry(418,"iq",2)); //Iraq (Republic of)
+ table.add(new MccEntry(419,"kw",2)); //Kuwait (State of)
+ table.add(new MccEntry(420,"sa",2)); //Saudi Arabia (Kingdom of)
+ table.add(new MccEntry(421,"ye",2)); //Yemen (Republic of)
+ table.add(new MccEntry(422,"om",2)); //Oman (Sultanate of)
+ table.add(new MccEntry(423,"ps",2)); //Palestine
+ table.add(new MccEntry(424,"ae",2)); //United Arab Emirates
+ table.add(new MccEntry(425,"il",2)); //Israel (State of)
+ table.add(new MccEntry(426,"bh",2)); //Bahrain (Kingdom of)
+ table.add(new MccEntry(427,"qa",2)); //Qatar (State of)
+ table.add(new MccEntry(428,"mn",2)); //Mongolia
+ table.add(new MccEntry(429,"np",2)); //Nepal
+ table.add(new MccEntry(430,"ae",2)); //United Arab Emirates
+ table.add(new MccEntry(431,"ae",2)); //United Arab Emirates
+ table.add(new MccEntry(432,"ir",2)); //Iran (Islamic Republic of)
+ table.add(new MccEntry(434,"uz",2)); //Uzbekistan (Republic of)
+ table.add(new MccEntry(436,"tj",2)); //Tajikistan (Republic of)
+ table.add(new MccEntry(437,"kg",2)); //Kyrgyz Republic
+ table.add(new MccEntry(438,"tm",2)); //Turkmenistan
+ table.add(new MccEntry(440,"jp",2,"ja",14)); //Japan
+ table.add(new MccEntry(441,"jp",2,"ja",14)); //Japan
+ table.add(new MccEntry(450,"kr",2)); //Korea (Republic of)
+ table.add(new MccEntry(452,"vn",2)); //Viet Nam (Socialist Republic of)
+ table.add(new MccEntry(454,"hk",2)); //"Hong Kong, China"
+ table.add(new MccEntry(455,"mo",2)); //"Macao, China"
+ table.add(new MccEntry(456,"kh",2)); //Cambodia (Kingdom of)
+ table.add(new MccEntry(457,"la",2)); //Lao People's Democratic Republic
+ table.add(new MccEntry(460,"cn",2,"zh",13)); //China (People's Republic of)
+ table.add(new MccEntry(461,"cn",2,"zh",13)); //China (People's Republic of)
+ table.add(new MccEntry(466,"tw",2)); //"Taiwan, China"
+ table.add(new MccEntry(467,"kp",2)); //Democratic People's Republic of Korea
+ table.add(new MccEntry(470,"bd",2)); //Bangladesh (People's Republic of)
+ table.add(new MccEntry(472,"mv",2)); //Maldives (Republic of)
+ table.add(new MccEntry(502,"my",2)); //Malaysia
+ table.add(new MccEntry(505,"au",2,"en",11)); //Australia
+ table.add(new MccEntry(510,"id",2)); //Indonesia (Republic of)
+ table.add(new MccEntry(514,"tl",2)); //Democratic Republic of Timor-Leste
+ table.add(new MccEntry(515,"ph",2)); //Philippines (Republic of the)
+ table.add(new MccEntry(520,"th",2)); //Thailand
+ table.add(new MccEntry(525,"sg",2,"en",11)); //Singapore (Republic of)
+ table.add(new MccEntry(528,"bn",2)); //Brunei Darussalam
+ table.add(new MccEntry(530,"nz",2, "en")); //New Zealand
+ table.add(new MccEntry(534,"mp",2)); //Northern Mariana Islands (Commonwealth of the)
+ table.add(new MccEntry(535,"gu",2)); //Guam
+ table.add(new MccEntry(536,"nr",2)); //Nauru (Republic of)
+ table.add(new MccEntry(537,"pg",2)); //Papua New Guinea
+ table.add(new MccEntry(539,"to",2)); //Tonga (Kingdom of)
+ table.add(new MccEntry(540,"sb",2)); //Solomon Islands
+ table.add(new MccEntry(541,"vu",2)); //Vanuatu (Republic of)
+ table.add(new MccEntry(542,"fj",2)); //Fiji (Republic of)
+ table.add(new MccEntry(543,"wf",2)); //Wallis and Futuna (Territoire franais d'outre-mer)
+ table.add(new MccEntry(544,"as",2)); //American Samoa
+ table.add(new MccEntry(545,"ki",2)); //Kiribati (Republic of)
+ table.add(new MccEntry(546,"nc",2)); //New Caledonia (Territoire franais d'outre-mer)
+ table.add(new MccEntry(547,"pf",2)); //French Polynesia (Territoire franais d'outre-mer)
+ table.add(new MccEntry(548,"ck",2)); //Cook Islands
+ table.add(new MccEntry(549,"ws",2)); //Samoa (Independent State of)
+ table.add(new MccEntry(550,"fm",2)); //Micronesia (Federated States of)
+ table.add(new MccEntry(551,"mh",2)); //Marshall Islands (Republic of the)
+ table.add(new MccEntry(552,"pw",2)); //Palau (Republic of)
+ table.add(new MccEntry(602,"eg",2)); //Egypt (Arab Republic of)
+ table.add(new MccEntry(603,"dz",2)); //Algeria (People's Democratic Republic of)
+ table.add(new MccEntry(604,"ma",2)); //Morocco (Kingdom of)
+ table.add(new MccEntry(605,"tn",2)); //Tunisia
+ table.add(new MccEntry(606,"ly",2)); //Libya (Socialist People's Libyan Arab Jamahiriya)
+ table.add(new MccEntry(607,"gm",2)); //Gambia (Republic of the)
+ table.add(new MccEntry(608,"sn",2)); //Senegal (Republic of)
+ table.add(new MccEntry(609,"mr",2)); //Mauritania (Islamic Republic of)
+ table.add(new MccEntry(610,"ml",2)); //Mali (Republic of)
+ table.add(new MccEntry(611,"gn",2)); //Guinea (Republic of)
+ table.add(new MccEntry(612,"ci",2)); //Cte d'Ivoire (Republic of)
+ table.add(new MccEntry(613,"bf",2)); //Burkina Faso
+ table.add(new MccEntry(614,"ne",2)); //Niger (Republic of the)
+ table.add(new MccEntry(615,"tg",2)); //Togolese Republic
+ table.add(new MccEntry(616,"bj",2)); //Benin (Republic of)
+ table.add(new MccEntry(617,"mu",2)); //Mauritius (Republic of)
+ table.add(new MccEntry(618,"lr",2)); //Liberia (Republic of)
+ table.add(new MccEntry(619,"sl",2)); //Sierra Leone
+ table.add(new MccEntry(620,"gh",2)); //Ghana
+ table.add(new MccEntry(621,"ng",2)); //Nigeria (Federal Republic of)
+ table.add(new MccEntry(622,"td",2)); //Chad (Republic of)
+ table.add(new MccEntry(623,"cf",2)); //Central African Republic
+ table.add(new MccEntry(624,"cm",2)); //Cameroon (Republic of)
+ table.add(new MccEntry(625,"cv",2)); //Cape Verde (Republic of)
+ table.add(new MccEntry(626,"st",2)); //Sao Tome and Principe (Democratic Republic of)
+ table.add(new MccEntry(627,"gq",2)); //Equatorial Guinea (Republic of)
+ table.add(new MccEntry(628,"ga",2)); //Gabonese Republic
+ table.add(new MccEntry(629,"cg",2)); //Congo (Republic of the)
+ table.add(new MccEntry(630,"cg",2)); //Democratic Republic of the Congo
+ table.add(new MccEntry(631,"ao",2)); //Angola (Republic of)
+ table.add(new MccEntry(632,"gw",2)); //Guinea-Bissau (Republic of)
+ table.add(new MccEntry(633,"sc",2)); //Seychelles (Republic of)
+ table.add(new MccEntry(634,"sd",2)); //Sudan (Republic of the)
+ table.add(new MccEntry(635,"rw",2)); //Rwanda (Republic of)
+ table.add(new MccEntry(636,"et",2)); //Ethiopia (Federal Democratic Republic of)
+ table.add(new MccEntry(637,"so",2)); //Somali Democratic Republic
+ table.add(new MccEntry(638,"dj",2)); //Djibouti (Republic of)
+ table.add(new MccEntry(639,"ke",2)); //Kenya (Republic of)
+ table.add(new MccEntry(640,"tz",2)); //Tanzania (United Republic of)
+ table.add(new MccEntry(641,"ug",2)); //Uganda (Republic of)
+ table.add(new MccEntry(642,"bi",2)); //Burundi (Republic of)
+ table.add(new MccEntry(643,"mz",2)); //Mozambique (Republic of)
+ table.add(new MccEntry(645,"zm",2)); //Zambia (Republic of)
+ table.add(new MccEntry(646,"mg",2)); //Madagascar (Republic of)
+ table.add(new MccEntry(647,"re",2)); //Reunion (French Department of)
+ table.add(new MccEntry(648,"zw",2)); //Zimbabwe (Republic of)
+ table.add(new MccEntry(649,"na",2)); //Namibia (Republic of)
+ table.add(new MccEntry(650,"mw",2)); //Malawi
+ table.add(new MccEntry(651,"ls",2)); //Lesotho (Kingdom of)
+ table.add(new MccEntry(652,"bw",2)); //Botswana (Republic of)
+ table.add(new MccEntry(653,"sz",2)); //Swaziland (Kingdom of)
+ table.add(new MccEntry(654,"km",2)); //Comoros (Union of the)
+ table.add(new MccEntry(655,"za",2,"en")); //South Africa (Republic of)
+ table.add(new MccEntry(657,"er",2)); //Eritrea
+ table.add(new MccEntry(702,"bz",2)); //Belize
+ table.add(new MccEntry(704,"gt",2)); //Guatemala (Republic of)
+ table.add(new MccEntry(706,"sv",2)); //El Salvador (Republic of)
+ table.add(new MccEntry(708,"hn",3)); //Honduras (Republic of)
+ table.add(new MccEntry(710,"ni",2)); //Nicaragua
+ table.add(new MccEntry(712,"cr",2)); //Costa Rica
+ table.add(new MccEntry(714,"pa",2)); //Panama (Republic of)
+ table.add(new MccEntry(716,"pe",2)); //Peru
+ table.add(new MccEntry(722,"ar",3)); //Argentine Republic
+ table.add(new MccEntry(724,"br",2)); //Brazil (Federative Republic of)
+ table.add(new MccEntry(730,"cl",2)); //Chile
+ table.add(new MccEntry(732,"co",3)); //Colombia (Republic of)
+ table.add(new MccEntry(734,"ve",2)); //Venezuela (Bolivarian Republic of)
+ table.add(new MccEntry(736,"bo",2)); //Bolivia (Republic of)
+ table.add(new MccEntry(738,"gy",2)); //Guyana
+ table.add(new MccEntry(740,"ec",2)); //Ecuador
+ table.add(new MccEntry(742,"gf",2)); //French Guiana (French Department of)
+ table.add(new MccEntry(744,"py",2)); //Paraguay (Republic of)
+ table.add(new MccEntry(746,"sr",2)); //Suriname (Republic of)
+ table.add(new MccEntry(748,"uy",2)); //Uruguay (Eastern Republic of)
+ table.add(new MccEntry(750,"fk",2)); //Falkland Islands (Malvinas)
+ //table.add(new MccEntry(901,"",2)); //"International Mobile, shared code"
+
+ Collections.sort(table);
+ }
}
diff --git a/telephony/java/com/android/internal/telephony/Phone.java b/telephony/java/com/android/internal/telephony/Phone.java
index 7029031..769b2fc8 100644
--- a/telephony/java/com/android/internal/telephony/Phone.java
+++ b/telephony/java/com/android/internal/telephony/Phone.java
@@ -18,6 +18,7 @@
import android.content.Context;
import android.content.SharedPreferences;
+import android.net.NetworkProperties;
import android.os.Handler;
import android.os.Message;
import android.preference.PreferenceManager;
@@ -101,6 +102,7 @@
static final String STATE_CHANGE_REASON_KEY = "reason";
static final String DATA_APN_TYPE_KEY = "apnType";
static final String DATA_APN_KEY = "apn";
+ static final String DATA_NETWORK_PROPERTIES_KEY = "dataProperties";
static final String DATA_IFACE_NAME_KEY = "iface";
static final String NETWORK_UNAVAILABLE_KEY = "networkUnvailable";
@@ -319,6 +321,11 @@
String getActiveApn();
/**
+ * Return the NetworkProperties for the named apn or null if not available
+ */
+ NetworkProperties getNetworkProperties(String apnType);
+
+ /**
* Get current signal strength. No change notification available on this
* interface. Use <code>PhoneStateNotifier</code> or an equivalent.
* An ASU is 0-31 or -1 if unknown (for GSM, dBm = -113 - 2 * asu).
diff --git a/telephony/java/com/android/internal/telephony/PhoneBase.java b/telephony/java/com/android/internal/telephony/PhoneBase.java
index cf80691..e5968a7 100644
--- a/telephony/java/com/android/internal/telephony/PhoneBase.java
+++ b/telephony/java/com/android/internal/telephony/PhoneBase.java
@@ -21,6 +21,7 @@
import android.content.Context;
import android.content.res.Configuration;
import android.content.SharedPreferences;
+import android.net.NetworkProperties;
import android.net.wifi.WifiManager;
import android.os.AsyncResult;
import android.os.Handler;
@@ -955,6 +956,10 @@
return mDataConnection.getActiveApnTypes();
}
+ public NetworkProperties getNetworkProperties(String apnType) {
+ return mDataConnection.getNetworkProperties(apnType);
+ }
+
public String getActiveApn() {
return mDataConnection.getActiveApnString();
}
diff --git a/telephony/java/com/android/internal/telephony/PhoneProxy.java b/telephony/java/com/android/internal/telephony/PhoneProxy.java
index fb2a938..d84859c 100644
--- a/telephony/java/com/android/internal/telephony/PhoneProxy.java
+++ b/telephony/java/com/android/internal/telephony/PhoneProxy.java
@@ -21,6 +21,7 @@
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
+import android.net.NetworkProperties;
import android.os.Handler;
import android.os.Message;
import android.os.SystemProperties;
@@ -211,6 +212,10 @@
return mActivePhone.getActiveApnTypes();
}
+ public NetworkProperties getNetworkProperties(String apnType) {
+ return mActivePhone.getNetworkProperties(apnType);
+ }
+
public String getActiveApn() {
return mActivePhone.getActiveApn();
}
diff --git a/telephony/java/com/android/internal/telephony/cdma/CdmaDataConnectionTracker.java b/telephony/java/com/android/internal/telephony/cdma/CdmaDataConnectionTracker.java
index bd103d4..8a3af3b 100644
--- a/telephony/java/com/android/internal/telephony/cdma/CdmaDataConnectionTracker.java
+++ b/telephony/java/com/android/internal/telephony/cdma/CdmaDataConnectionTracker.java
@@ -53,6 +53,7 @@
import com.android.internal.telephony.RetryManager;
import com.android.internal.telephony.ServiceStateTracker;
+import java.net.NetworkInterface;
import java.util.ArrayList;
/**
@@ -736,6 +737,8 @@
}
if (ar.exception == null) {
+ mNetworkProperties = makeNetworkProperties(mActiveDataConnection);
+
// everything is setup
notifyDefaultData(reason);
} else {
diff --git a/telephony/java/com/android/internal/telephony/gsm/GsmDataConnectionTracker.java b/telephony/java/com/android/internal/telephony/gsm/GsmDataConnectionTracker.java
index 6826fa8..c76da80 100644
--- a/telephony/java/com/android/internal/telephony/gsm/GsmDataConnectionTracker.java
+++ b/telephony/java/com/android/internal/telephony/gsm/GsmDataConnectionTracker.java
@@ -30,6 +30,8 @@
import android.net.ConnectivityManager;
import android.net.IConnectivityManager;
import android.net.NetworkInfo;
+import android.net.NetworkProperties;
+import android.net.ProxyProperties;
import android.net.TrafficStats;
import android.net.Uri;
import android.net.wifi.WifiManager;
@@ -58,6 +60,9 @@
import com.android.internal.telephony.DataConnection.FailCause;
import java.io.IOException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.UnknownHostException;
import java.util.ArrayList;
/**
@@ -1133,6 +1138,25 @@
}
if (ar.exception == null) {
+ mNetworkProperties = makeNetworkProperties(mActivePdp);
+
+ ApnSetting apn = mActivePdp.getApn();
+ if (apn.proxy != null && apn.proxy.length() != 0) {
+ try {
+ ProxyProperties proxy = new ProxyProperties();
+ proxy.setAddress(InetAddress.getByName(apn.proxy));
+ proxy.setPort(Integer.parseInt(apn.port));
+ mNetworkProperties.setHttpProxy(proxy);
+ } catch (UnknownHostException e) {
+ Log.e(LOG_TAG, "UnknownHostException making ProxyProperties: " + e);
+ } catch (SecurityException e) {
+ Log.e(LOG_TAG, "SecurityException making ProxyProperties: " + e);
+ } catch (NumberFormatException e) {
+ Log.e(LOG_TAG, "NumberFormatException making ProxyProperties (" + apn.port +
+ "): " + e);
+ }
+ }
+
// everything is setup
if (isApnTypeActive(Phone.APN_TYPE_DEFAULT)) {
SystemProperties.set("gsm.defaultpdpcontext.active", "true");
diff --git a/telephony/java/com/android/internal/telephony/gsm/SmsMessage.java b/telephony/java/com/android/internal/telephony/gsm/SmsMessage.java
index 487372e..278e1ba 100644
--- a/telephony/java/com/android/internal/telephony/gsm/SmsMessage.java
+++ b/telephony/java/com/android/internal/telephony/gsm/SmsMessage.java
@@ -932,6 +932,8 @@
// TP-Message-Type-Indicator
// 9.2.3
case 0:
+ case 3: //GSM 03.40 9.2.3.1: MTI == 3 is Reserved.
+ //This should be processed in the same way as MTI == 0 (Deliver)
parseSmsDeliver(p, firstByte);
break;
case 2:
diff --git a/telephony/tests/telephonytests/src/com/android/internal/telephony/MccTableTest.java b/telephony/tests/telephonytests/src/com/android/internal/telephony/MccTableTest.java
index 2d6977c..7eb3df8 100644
--- a/telephony/tests/telephonytests/src/com/android/internal/telephony/MccTableTest.java
+++ b/telephony/tests/telephonytests/src/com/android/internal/telephony/MccTableTest.java
@@ -28,7 +28,7 @@
@SmallTest
public void testTimeZone() throws Exception {
- assertEquals(MccTable.defaultTimeZoneForMcc(208), "Europe/Paris");
+ assertEquals(MccTable.defaultTimeZoneForMcc(208), "ECT");
assertEquals(MccTable.defaultTimeZoneForMcc(232), "Europe/Vienna");
assertEquals(MccTable.defaultTimeZoneForMcc(655), "Africa/Johannesburg");
assertEquals(MccTable.defaultTimeZoneForMcc(440), "Asia/Tokyo");
diff --git a/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java b/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java
index fabbf89..3618c7b 100644
--- a/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java
+++ b/tests/DumpRenderTree/src/com/android/dumprendertree/LayoutTestsAutoTest.java
@@ -222,14 +222,22 @@
// The generic result is at <path>/<name>-expected.txt
// First try the Android-specific result at
// platform/android-<js-engine>/<path>/<name>-expected.txt
+ // then
+ // platform/android/<path>/<name>-expected.txt
int pos = test.lastIndexOf('.');
if (pos == -1)
return null;
String genericExpectedResult = test.substring(0, pos) + "-expected.txt";
String androidExpectedResultsDir = "platform/android-" + mJsEngine + "/";
- String androidExpectedResult =
- genericExpectedResult.replaceFirst(LAYOUT_TESTS_ROOT, LAYOUT_TESTS_ROOT + androidExpectedResultsDir);
+ String androidExpectedResult = genericExpectedResult.replaceFirst(LAYOUT_TESTS_ROOT,
+ LAYOUT_TESTS_ROOT + androidExpectedResultsDir);
File f = new File(androidExpectedResult);
+ if (f.exists())
+ return androidExpectedResult;
+ androidExpectedResultsDir = "platform/android/";
+ androidExpectedResult = genericExpectedResult.replaceFirst(LAYOUT_TESTS_ROOT,
+ LAYOUT_TESTS_ROOT + androidExpectedResultsDir);
+ f = new File(androidExpectedResult);
return f.exists() ? androidExpectedResult : genericExpectedResult;
}
@@ -328,6 +336,7 @@
intent.putExtra(TestShellActivity.TIMEOUT_IN_MILLIS, timeout);
intent.putExtra(TestShellActivity.TOTAL_TEST_COUNT, mTestCount);
intent.putExtra(TestShellActivity.CURRENT_TEST_NUMBER, testNumber);
+ intent.putExtra(TestShellActivity.STOP_ON_REF_ERROR, true);
activity.startActivity(intent);
// Wait until done.
diff --git a/tests/DumpRenderTree/src/com/android/dumprendertree/TestShellActivity.java b/tests/DumpRenderTree/src/com/android/dumprendertree/TestShellActivity.java
index 0a04712..bf66fae 100644
--- a/tests/DumpRenderTree/src/com/android/dumprendertree/TestShellActivity.java
+++ b/tests/DumpRenderTree/src/com/android/dumprendertree/TestShellActivity.java
@@ -179,6 +179,7 @@
mTimeoutInMillis = intent.getIntExtra(TIMEOUT_IN_MILLIS, 0);
mGetDrawtime = intent.getBooleanExtra(GET_DRAW_TIME, false);
mSaveImagePath = intent.getStringExtra(SAVE_IMAGE);
+ mStopOnRefError = intent.getBooleanExtra(STOP_ON_REF_ERROR, false);
setTitle("Test " + mCurrentTestNumber + " of " + mTotalTestCount);
float ratio = (float)mCurrentTestNumber / mTotalTestCount;
int progress = (int)(ratio * Window.PROGRESS_END);
@@ -699,8 +700,8 @@
// waiting for "notifyDone" signal to finish, then there's no point in waiting
// anymore because the JS execution is already terminated at this point and a
// "notifyDone" will never come out so it's just wasting time till timeout kicks in
- if (msg.contains("Uncaught ReferenceError:") || msg.contains("Uncaught TypeError:")
- && mWaitUntilDone) {
+ if ((msg.contains("Uncaught ReferenceError:") || msg.contains("Uncaught TypeError:"))
+ && mWaitUntilDone && mStopOnRefError) {
Log.w(LOGTAG, "Terminating test case on uncaught ReferenceError or TypeError.");
mHandler.postDelayed(new Runnable() {
public void run() {
@@ -857,6 +858,7 @@
private boolean mGetDrawtime;
private int mTotalTestCount;
private int mCurrentTestNumber;
+ private boolean mStopOnRefError;
// States
private boolean mTimedOut;
@@ -897,6 +899,7 @@
static final String SAVE_IMAGE = "SaveImage";
static final String TOTAL_TEST_COUNT = "TestCount";
static final String CURRENT_TEST_NUMBER = "TestNumber";
+ static final String STOP_ON_REF_ERROR = "StopOnReferenceError";
static final int DRAW_RUNS = 5;
static final String DRAW_TIME_LOG = "/sdcard/android/page_draw_time.txt";
diff --git a/tests/DumpRenderTree2/res/values/strings.xml b/tests/DumpRenderTree2/res/values/strings.xml
new file mode 100644
index 0000000..2dcd3ca
--- /dev/null
+++ b/tests/DumpRenderTree2/res/values/strings.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+Copyright (C) 2010 The Android Open Source Project
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<resources>
+ <string name="dialog_run_abort_dir_title_prefix">Directory:</string>
+ <string name="dialog_run_abort_dir_msg">This will run all the tests in this directory and all
+ the subdirectories. It may take a few hours!</string>
+ <string name="dialog_run_abort_dir_ok_button">Run tests!</string>
+ <string name="dialog_run_abort_dir_abort_button">Abort</string>
+
+ <string name="dialog_progress_title">Loading items.</string>
+ <string name="dialog_progress_msg">Please wait...</string>
+</resources>
\ No newline at end of file
diff --git a/tests/DumpRenderTree2/src/com/android/dumprendertree2/ui/DirListActivity.java b/tests/DumpRenderTree2/src/com/android/dumprendertree2/ui/DirListActivity.java
index 97a81ce..d8509c1 100644
--- a/tests/DumpRenderTree2/src/com/android/dumprendertree2/ui/DirListActivity.java
+++ b/tests/DumpRenderTree2/src/com/android/dumprendertree2/ui/DirListActivity.java
@@ -20,10 +20,16 @@
import com.android.dumprendertree2.R;
import android.app.Activity;
+import android.app.AlertDialog;
+import android.app.Dialog;
import android.app.ListActivity;
+import android.app.ProgressDialog;
+import android.content.DialogInterface;
import android.content.res.Configuration;
import android.os.Bundle;
import android.os.Environment;
+import android.os.Handler;
+import android.os.Message;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
@@ -50,7 +56,19 @@
File.separator + "LayoutTests";
/** TODO: This is just a guess - think of a better way to achieve it */
- private static final int MEAN_TITLE_CHAR_SIZE = 12;
+ private static final int MEAN_TITLE_CHAR_SIZE = 13;
+
+ private static final int PROGRESS_DIALOG_DELAY_MS = 200;
+
+ /** Code for the dialog, used in showDialog and onCreateDialog */
+ private static final int DIALOG_RUN_ABORT_DIR = 0;
+
+ /** Messages codes */
+ private static final int MSG_LOADED_ITEMS = 0;
+ private static final int MSG_SHOW_PROGRESS_DIALOG = 1;
+
+ /** Initialized lazily before first sProgressDialog.show() */
+ private static ProgressDialog sProgressDialog;
private ListView mListView;
@@ -63,6 +81,28 @@
private String mRootDirPath = ROOT_DIR_PATH;
/**
+ * A thread responsible for loading the contents of the directory from sd card
+ * and sending them via Message to main thread that then loads them into
+ * ListView
+ */
+ private class LoadListItemsThread extends Thread {
+ private Handler mHandler;
+ private String mRelativePath;
+
+ public LoadListItemsThread(String relativePath, Handler handler) {
+ mRelativePath = relativePath;
+ mHandler = handler;
+ }
+
+ @Override
+ public void run() {
+ Message msg = mHandler.obtainMessage(MSG_LOADED_ITEMS);
+ msg.obj = getDirList(mRelativePath);
+ mHandler.sendMessage(msg);
+ }
+ }
+
+ /**
* Very simple object to use inside ListView as an item.
*/
private static class ListItem implements Comparable<ListItem> {
@@ -149,8 +189,9 @@
mListView = getListView();
mListView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
- public void onItemClick(AdapterView<?> adapterView, View view, int position, long id) {
- ListItem item = (ListItem) adapterView.getItemAtPosition(position);
+ @Override
+ public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
+ ListItem item = (ListItem) parent.getItemAtPosition(position);
if (item.isDirectory()) {
showDir(item.getRelativePath());
@@ -160,6 +201,24 @@
}
});
+ mListView.setOnItemLongClickListener(new AdapterView.OnItemLongClickListener() {
+ @Override
+ public boolean onItemLongClick(AdapterView<?> parent, View view, int position, long id) {
+ ListItem item = (ListItem) parent.getItemAtPosition(position);
+
+ if (item.isDirectory()) {
+ Bundle arguments = new Bundle(1);
+ arguments.putString("name", item.getName());
+ arguments.putString("relativePath", item.getRelativePath());
+ showDialog(DIALOG_RUN_ABORT_DIR, arguments);
+ } else {
+ /** TODO: Maybe show some info about a test? */
+ }
+
+ return true;
+ }
+ });
+
/** All the paths are relative to test root dir where possible */
showDir("");
}
@@ -189,6 +248,48 @@
setTitle(shortenTitle(mCurrentDirPath));
}
+ @Override
+ protected Dialog onCreateDialog(int id, Bundle args) {
+ Dialog dialog = null;
+ AlertDialog.Builder builder = new AlertDialog.Builder(this);
+
+ switch (id) {
+ case DIALOG_RUN_ABORT_DIR:
+ builder.setTitle(getText(R.string.dialog_run_abort_dir_title_prefix) + " " +
+ args.getString("name"));
+ builder.setMessage(R.string.dialog_run_abort_dir_msg);
+ builder.setCancelable(true);
+
+ builder.setPositiveButton(R.string.dialog_run_abort_dir_ok_button,
+ new DialogInterface.OnClickListener() {
+ @Override
+ public void onClick(DialogInterface dialog, int which) {
+ /** TODO: Run tests from the dir */
+ removeDialog(DIALOG_RUN_ABORT_DIR);
+ }
+ });
+
+ builder.setNegativeButton(R.string.dialog_run_abort_dir_abort_button,
+ new DialogInterface.OnClickListener() {
+ @Override
+ public void onClick(DialogInterface dialog, int which) {
+ removeDialog(DIALOG_RUN_ABORT_DIR);
+ }
+ });
+
+ dialog = builder.create();
+ dialog.setOnCancelListener(new DialogInterface.OnCancelListener() {
+ @Override
+ public void onCancel(DialogInterface dialog) {
+ removeDialog(DIALOG_RUN_ABORT_DIR);
+ }
+ });
+ break;
+ }
+
+ return dialog;
+ }
+
/**
* Loads the contents of dir into the list view.
*
@@ -197,8 +298,41 @@
*/
private void showDir(String dirPath) {
mCurrentDirPath = dirPath;
- setTitle(shortenTitle(dirPath));
- setListAdapter(new DirListAdapter(this, getDirList(dirPath)));
+
+ /** Show progress dialog with a delay */
+ final Handler delayedDialogHandler = new Handler() {
+ @Override
+ public void handleMessage(Message msg) {
+ if (msg.what == MSG_SHOW_PROGRESS_DIALOG) {
+ if (sProgressDialog == null) {
+ sProgressDialog = new ProgressDialog(DirListActivity.this);
+ sProgressDialog.setCancelable(false);
+ sProgressDialog.setProgressStyle(ProgressDialog.STYLE_SPINNER);
+ sProgressDialog.setTitle(R.string.dialog_progress_title);
+ sProgressDialog.setMessage(getText(R.string.dialog_progress_msg));
+ }
+ sProgressDialog.show();
+ }
+ }
+ };
+ Message msgShowDialog = delayedDialogHandler.obtainMessage(MSG_SHOW_PROGRESS_DIALOG);
+ delayedDialogHandler.sendMessageDelayed(msgShowDialog, PROGRESS_DIALOG_DELAY_MS);
+
+ /** Delegate loading contents from SD card to a new thread */
+ new LoadListItemsThread(mCurrentDirPath, new Handler() {
+ @Override
+ public void handleMessage(Message msg) {
+ if (msg.what == MSG_LOADED_ITEMS) {
+ setListAdapter(new DirListAdapter(DirListActivity.this,
+ (ListItem[])msg.obj));
+ delayedDialogHandler.removeMessages(MSG_SHOW_PROGRESS_DIALOG);
+ setTitle(shortenTitle(mCurrentDirPath));
+ if (sProgressDialog != null) {
+ sProgressDialog.dismiss();
+ }
+ }
+ }
+ }).start();
}
/**
@@ -222,6 +356,8 @@
* Return the array with contents of the given directory.
* First it contains the subfolders, then the files. Both sorted
* alphabetically.
+ *
+ * The dirPath is relative.
*/
private ListItem[] getDirList(String dirPath) {
File dir = new File(mRootDirPath, dirPath);
diff --git a/tests/HwAccelerationTest/AndroidManifest.xml b/tests/HwAccelerationTest/AndroidManifest.xml
index e09e70f..cb894f1 100644
--- a/tests/HwAccelerationTest/AndroidManifest.xml
+++ b/tests/HwAccelerationTest/AndroidManifest.xml
@@ -78,6 +78,24 @@
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
+
+ <activity
+ android:name="QuickRejectActivity"
+ android:label="_QuickReject">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+
+ <activity
+ android:name="RotationActivity"
+ android:label="_Rotation">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
</application>
</manifest>
diff --git a/tests/HwAccelerationTest/src/com/google/android/test/hwui/QuickRejectActivity.java b/tests/HwAccelerationTest/src/com/google/android/test/hwui/QuickRejectActivity.java
new file mode 100644
index 0000000..fd7a1e6
--- /dev/null
+++ b/tests/HwAccelerationTest/src/com/google/android/test/hwui/QuickRejectActivity.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.android.test.hwui;
+
+import android.app.Activity;
+import android.content.Context;
+import android.graphics.Bitmap;
+import android.graphics.BitmapFactory;
+import android.graphics.Canvas;
+import android.graphics.Paint;
+import android.os.Bundle;
+import android.view.View;
+
+@SuppressWarnings({"UnusedDeclaration"})
+public class QuickRejectActivity extends Activity {
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ final QuickRejectView view = new QuickRejectView(this);
+ setContentView(view);
+ }
+
+ static class QuickRejectView extends View {
+ private Paint mBitmapPaint;
+ private final Bitmap mBitmap1;
+
+ QuickRejectView(Context c) {
+ super(c);
+
+ mBitmap1 = BitmapFactory.decodeResource(c.getResources(), R.drawable.sunset1);
+
+ mBitmapPaint = new Paint();
+ mBitmapPaint.setFilterBitmap(true);
+ }
+
+ @Override
+ protected void onDraw(Canvas canvas) {
+ super.onDraw(canvas);
+
+ canvas.save();
+ canvas.clipRect(0.0f, 0.0f, 40.0f, 40.0f);
+ canvas.drawBitmap(mBitmap1, 0.0f, 0.0f, mBitmapPaint);
+ canvas.drawBitmap(mBitmap1, -mBitmap1.getWidth(), 0.0f, mBitmapPaint);
+ canvas.drawBitmap(mBitmap1, 50.0f, 0.0f, mBitmapPaint);
+ canvas.restore();
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/HwAccelerationTest/src/com/google/android/test/hwui/RotationActivity.java b/tests/HwAccelerationTest/src/com/google/android/test/hwui/RotationActivity.java
new file mode 100644
index 0000000..e629cb8
--- /dev/null
+++ b/tests/HwAccelerationTest/src/com/google/android/test/hwui/RotationActivity.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.android.test.hwui;
+
+import android.app.Activity;
+import android.content.Context;
+import android.graphics.Canvas;
+import android.graphics.Paint;
+import android.os.Bundle;
+import android.view.View;
+
+@SuppressWarnings({"UnusedDeclaration"})
+public class RotationActivity extends Activity {
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+
+ DrawingView container = new DrawingView(this);
+
+ setContentView(container);
+ }
+
+ @SuppressWarnings({"UnusedDeclaration"})
+ static int dipToPx(Context c, int dip) {
+ return (int) (c.getResources().getDisplayMetrics().density * dip + 0.5f);
+ }
+
+ static class DrawingView extends View {
+ private final Paint mPaint;
+
+ DrawingView(Context c) {
+ super(c);
+ mPaint = new Paint();
+ mPaint.setAntiAlias(true);
+ }
+
+ @Override
+ protected void onDraw(Canvas canvas) {
+ canvas.save();
+ canvas.translate(dipToPx(getContext(), 400), dipToPx(getContext(), 200));
+ canvas.rotate(45.0f);
+ canvas.drawRGB(255, 255, 255);
+ mPaint.setColor(0xffff0000);
+ canvas.drawRect(-80.0f, -80.0f, 80.0f, 80.0f, mPaint);
+ canvas.drawRect(0.0f, 0.0f, 220.0f, 220.0f, mPaint);
+ canvas.restore();
+ }
+ }
+}
diff --git a/wifi/java/android/net/wifi/WifiStateTracker.java b/wifi/java/android/net/wifi/WifiStateTracker.java
index 5b4faf97..5780a04 100644
--- a/wifi/java/android/net/wifi/WifiStateTracker.java
+++ b/wifi/java/android/net/wifi/WifiStateTracker.java
@@ -30,6 +30,7 @@
import android.net.ConnectivityManager;
import android.net.NetworkInfo.DetailedState;
import android.net.NetworkInfo.State;
+import android.net.NetworkProperties;
import android.os.Message;
import android.os.Parcelable;
import android.os.Handler;
@@ -54,6 +55,9 @@
import android.database.ContentObserver;
import com.android.internal.app.IBatteryStats;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
@@ -211,6 +215,7 @@
private boolean mDisconnectExpected;
private DhcpHandler mDhcpTarget;
private DhcpInfo mDhcpInfo;
+ private NetworkProperties mNetworkProperties;
private int mLastSignalLevel = -1;
private String mLastBssid;
private String mLastSsid;
@@ -315,7 +320,6 @@
private String mInterfaceName;
private static String LS = System.getProperty("line.separator");
- private static String[] sDnsPropNames;
private Handler mTarget;
private Context mContext;
private boolean mPrivateDnsRouteSet = false;
@@ -379,10 +383,7 @@
mSettingsObserver = new SettingsObserver(new Handler());
mInterfaceName = SystemProperties.get("wifi.interface", "tiwlan0");
- sDnsPropNames = new String[] {
- "dhcp." + mInterfaceName + ".dns1",
- "dhcp." + mInterfaceName + ".dns2"
- };
+ mNetworkProperties = new NetworkProperties();
mBatteryStats = IBatteryStats.Stub.asInterface(ServiceManager.getService("batteryinfo"));
}
@@ -477,15 +478,6 @@
}
/**
- * Return the IP addresses of the DNS servers available for the WLAN
- * network interface.
- * @return a list of DNS addresses, with no holes.
- */
- public String[] getDnsPropNames() {
- return sDnsPropNames;
- }
-
- /**
* Return the name of our WLAN network interface.
* @return the name of our interface.
*/
@@ -901,6 +893,7 @@
}
setDetailedState(DetailedState.DISCONNECTED);
setSupplicantState(SupplicantState.UNINITIALIZED);
+ mNetworkProperties.clear();
mHaveIpAddress = false;
mObtainingIpAddress = false;
if (died) {
@@ -1008,6 +1001,7 @@
reconnectCommand();
}
} else if (newState == SupplicantState.DISCONNECTED) {
+ mNetworkProperties.clear();
mHaveIpAddress = false;
if (isDriverStopped() || mDisconnectExpected) {
handleDisconnectedState(DetailedState.DISCONNECTED, true);
@@ -1192,6 +1186,7 @@
}
mReconnectCount = 0;
mHaveIpAddress = true;
+ configureNetworkProperties();
mObtainingIpAddress = false;
mWifiInfo.setIpAddress(mDhcpInfo.ipAddress);
mLastSignalLevel = -1; // force update of signal strength
@@ -1217,6 +1212,7 @@
// [31- 1] Reserved for future use
// [ 0- 0] Interface configuration succeeded (1) or failed (0)
EventLog.writeEvent(EVENTLOG_INTERFACE_CONFIGURATION_STATE_CHANGED, 0);
+ mNetworkProperties.clear();
mHaveIpAddress = false;
mWifiInfo.setIpAddress(0);
mObtainingIpAddress = false;
@@ -1289,6 +1285,49 @@
return disabledNetwork;
}
+
+ private void configureNetworkProperties() {
+ try {
+ mNetworkProperties.setInterface(NetworkInterface.getByName(mInterfaceName));
+ } catch (SocketException e) {
+ Log.e(TAG, "SocketException creating NetworkInterface from " + mInterfaceName +
+ ". e=" + e);
+ return;
+ } catch (NullPointerException e) {
+ Log.e(TAG, "NPE creating NetworkInterface. e=" + e);
+ return;
+ }
+ // TODO - fix this for v6
+ try {
+ mNetworkProperties.addAddress(InetAddress.getByAddress(
+ NetworkUtils.v4IntToArray(mDhcpInfo.ipAddress)));
+ } catch (UnknownHostException e) {
+ Log.e(TAG, "Exception setting IpAddress using " + mDhcpInfo + ", e=" + e);
+ }
+
+ try {
+ mNetworkProperties.setGateway(InetAddress.getByAddress(NetworkUtils.v4IntToArray(
+ mDhcpInfo.gateway)));
+ } catch (UnknownHostException e) {
+ Log.e(TAG, "Exception setting Gateway using " + mDhcpInfo + ", e=" + e);
+ }
+
+ try {
+ mNetworkProperties.addDns(InetAddress.getByAddress(
+ NetworkUtils.v4IntToArray(mDhcpInfo.dns1)));
+ } catch (UnknownHostException e) {
+ Log.e(TAG, "Exception setting Dns1 using " + mDhcpInfo + ", e=" + e);
+ }
+ try {
+ mNetworkProperties.addDns(InetAddress.getByAddress(
+ NetworkUtils.v4IntToArray(mDhcpInfo.dns2)));
+
+ } catch (UnknownHostException e) {
+ Log.e(TAG, "Exception setting Dns2 using " + mDhcpInfo + ", e=" + e);
+ }
+ // TODO - add proxy info
+ }
+
private void configureInterface() {
checkPollTimer();
mLastSignalLevel = -1;
@@ -1300,11 +1339,9 @@
} else {
int event;
if (NetworkUtils.configureInterface(mInterfaceName, mDhcpInfo)) {
- mHaveIpAddress = true;
event = EVENT_INTERFACE_CONFIGURATION_SUCCEEDED;
if (LOCAL_LOGD) Log.v(TAG, "Static IP configuration succeeded");
} else {
- mHaveIpAddress = false;
event = EVENT_INTERFACE_CONFIGURATION_FAILED;
if (LOCAL_LOGD) Log.v(TAG, "Static IP configuration failed");
}
@@ -1339,6 +1376,7 @@
*/
public void resetConnections(boolean disableInterface) {
if (LOCAL_LOGD) Log.d(TAG, "Reset connections and stopping DHCP");
+ mNetworkProperties.clear();
mHaveIpAddress = false;
mObtainingIpAddress = false;
mWifiInfo.setIpAddress(0);
@@ -2282,11 +2320,12 @@
mNotificationRepeatTime = 0;
mNumScansSinceNetworkStateChange = 0;
}
-
+
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
- sb.append("interface ").append(mInterfaceName);
+
+ sb.append(mNetworkProperties.toString());
sb.append(" runState=");
if (mRunState >= 1 && mRunState <= mRunStateNames.length) {
sb.append(mRunStateNames[mRunState-1]);
@@ -2366,7 +2405,7 @@
setBluetoothCoexistenceMode(
WifiNative.BLUETOOTH_COEXISTENCE_MODE_DISABLED);
}
-
+
powerMode = getPowerMode();
if (powerMode < 0) {
// Handle the case where supplicant driver does not support
@@ -2588,4 +2627,8 @@
Settings.Secure.WIFI_NETWORKS_AVAILABLE_NOTIFICATION_ON, 1) == 1;
}
}
+
+ public NetworkProperties getNetworkProperties() {
+ return mNetworkProperties;
+ }
}