devNotes 7-13-16 monitor logcat analysis and optimizations

dfhgnhfdnh3

Step 5: Interacting with Physics objects

Add the OVRPhysicsRaycaster component to the OVRCameraRig. This new component looks very similar to Unity’s built-in PhysicsRaycaster. You’ll notice in the inspector that it has an Event Mask property. This filter specifies which objects in the scene this ray caster will detect. Set this to just the “Gazable” layer. The scene has been setup so that all interactive components are in the “Gazable” layer. Run the scene again and now try gaze-clicking on the lever in the middle of the scene.

 

Mobile
60 FPS (required by Oculus)
50-100 draw calls per frame
50,000-100,000 triangles or vertices per frame

using System;
using System.Collections.Generic;

namespace UnityEngine.EventSystems
{
    public class OVRInputModule : PointerInputModule
    {
        [Tooltip("Object which points with Z axis. E.g. CentreEyeAnchor from OVRCameraRig")]
        public Transform rayTransform;

        [Tooltip("Gamepad button to act as gaze click")]
        public OVRInput.Button joyPadClickButton = OVRInput.Button.One;

        [Tooltip("Keyboard button to act as gaze click")]
        public KeyCode gazeClickKey = KeyCode.Space;

        [Header("Physics")]
        [Tooltip("Perform an sphere cast to determine correct depth for gaze pointer")]
        public bool performSphereCastForGazepointer;

        [Tooltip("Match the gaze pointer normal to geometry normal for physics colliders")]
        public bool matchNormalOnPhysicsColliders;

        [Header("Gamepad Stick Scroll")]
        [Tooltip("Enable scrolling with the left stick on a gamepad")]
        public bool useLeftStickScroll = true;

        [Tooltip("Deadzone for left stick to prevent accidental scrolling")]
        public float leftStickDeadZone = 0.15f;

        [Header("Touchpad Swipe Scroll")]
        [Tooltip("Enable scrolling by swiping the GearVR touchpad")]
        public bool useSwipeScroll = true;
        [Tooltip("Minimum swipe amount to trigger scrolling")]
        public float minSwipeMovement = 0;
        [Tooltip("Distance scrolled when swipe scroll occurs")]
        public float swipeScrollScale = 4f;

        #region GearVR swipe scroll
        private Vector2 swipeStartPos;
        private Vector2 unusedSwipe;
        #endregion

        // The raycaster that gets to do pointer interaction (e.g. with a mouse), gaze interaction always works
       // private OVRRaycaster _activeGraphicRaycaster;
        [NonSerialized]
        public OVRRaycaster activeGraphicRaycaster;
        [Header("Dragging")]
        [Tooltip("Minimum pointer movement in degrees to start dragging")]
        public float angleDragThreshold = 1;

        
        

        // The following region contains code exactly the same as the implementation
        // of StandaloneInputModule. It is copied here rather than inheriting from StandaloneInputModule
        // because most of StandaloneInputModule is private so it isn't possible to easily derive from.
        // Future changes from Unity to StandaloneInputModule will make it possible for this class to
        // derive from StandaloneInputModule instead of PointerInput module.
        // 
        // The following functions are not present in the following region since they have modified
        // versions in the next region:
        // Process
        // ProcessMouseEvent
        // UseMouse
        #region StandaloneInputModule code
        
         private float m_NextAction;

        private Vector2 m_LastMousePosition;
        private Vector2 m_MousePosition;

        protected OVRInputModule()
        {}

        void Reset()
        {
            allowActivationOnMobileDevice = true;
        }

        [Obsolete("Mode is no longer needed on input module as it handles both mouse and keyboard simultaneously.", false)]
        public enum InputMode
        {
            Mouse,
            Buttons
        }

        [Obsolete("Mode is no longer needed on input module as it handles both mouse and keyboard simultaneously.", false)]
        public InputMode inputMode
        {
            get { return InputMode.Mouse; }
        }
        [Header("Standalone Input Module")]
        [SerializeField]
        private string m_HorizontalAxis = "Horizontal";

        /// <summary>
        /// Name of the vertical axis for movement (if axis events are used).
        /// </summary>
        [SerializeField]
        private string m_VerticalAxis = "Vertical";

        /// <summary>
        /// Name of the submit button.
        /// </summary>
        [SerializeField]
        private string m_SubmitButton = "Submit";

        /// <summary>
        /// Name of the submit button.
        /// </summary>
        [SerializeField]
        private string m_CancelButton = "Cancel";

        [SerializeField]
        private float m_InputActionsPerSecond = 10;

        [SerializeField]
        private bool m_AllowActivationOnMobileDevice;

        public bool allowActivationOnMobileDevice
        {
            get { return m_AllowActivationOnMobileDevice; }
            set { m_AllowActivationOnMobileDevice = value; }
        }

        public float inputActionsPerSecond
        {
            get { return m_InputActionsPerSecond; }
            set { m_InputActionsPerSecond = value; }
        }

        /// <summary>
        /// Name of the horizontal axis for movement (if axis events are used).
        /// </summary>
        public string horizontalAxis
        {
            get { return m_HorizontalAxis; }
            set { m_HorizontalAxis = value; }
        }

        /// <summary>
        /// Name of the vertical axis for movement (if axis events are used).
        /// </summary>
        public string verticalAxis
        {
            get { return m_VerticalAxis; }
            set { m_VerticalAxis = value; }
        }

        public string submitButton
        {
            get { return m_SubmitButton; }
            set { m_SubmitButton = value; }
        }

        public string cancelButton
        {
            get { return m_CancelButton; }
            set { m_CancelButton = value; }
        }

        public override void UpdateModule()
        {
            m_LastMousePosition = m_MousePosition;
            m_MousePosition = Input.mousePosition;
        }

        public override bool IsModuleSupported()
        {
            // Check for mouse presence instead of whether touch is supported,
            // as you can connect mouse to a tablet and in that case we'd want
            // to use StandaloneInputModule for non-touch input events.
            return m_AllowActivationOnMobileDevice || Input.mousePresent;
        }

        public override bool ShouldActivateModule()
        {
            if (!base.ShouldActivateModule())
                return false;

            var shouldActivate = Input.GetButtonDown(m_SubmitButton);
            shouldActivate |= Input.GetButtonDown(m_CancelButton);
            shouldActivate |= !Mathf.Approximately(Input.GetAxisRaw(m_HorizontalAxis), 0.0f);
            shouldActivate |= !Mathf.Approximately(Input.GetAxisRaw(m_VerticalAxis), 0.0f);
            shouldActivate |= (m_MousePosition - m_LastMousePosition).sqrMagnitude > 0.0f;
            shouldActivate |= Input.GetMouseButtonDown(0);
            return shouldActivate;
        }

        public override void ActivateModule()
        {
            base.ActivateModule();
            m_MousePosition = Input.mousePosition;
            m_LastMousePosition = Input.mousePosition;

            var toSelect = eventSystem.currentSelectedGameObject;
            if (toSelect == null)
                toSelect = eventSystem.firstSelectedGameObject;

            eventSystem.SetSelectedGameObject(toSelect, GetBaseEventData());
        }

        public override void DeactivateModule()
        {
            base.DeactivateModule();
            ClearSelection();
        }

        

        /// <summary>
        /// Process submit keys.
        /// </summary>
        private bool SendSubmitEventToSelectedObject()
        {
            if (eventSystem.currentSelectedGameObject == null)
                return false;

            var data = GetBaseEventData();
            if (Input.GetButtonDown(m_SubmitButton))
                ExecuteEvents.Execute(eventSystem.currentSelectedGameObject, data, ExecuteEvents.submitHandler);

            if (Input.GetButtonDown(m_CancelButton))
                ExecuteEvents.Execute(eventSystem.currentSelectedGameObject, data, ExecuteEvents.cancelHandler);
            return data.used;
        }

        private bool AllowMoveEventProcessing(float time)
        {
            bool allow = Input.GetButtonDown(m_HorizontalAxis);
            allow |= Input.GetButtonDown(m_VerticalAxis);
            allow |= (time > m_NextAction);
            return allow;
        }

        private Vector2 GetRawMoveVector()
        {
            Vector2 move = Vector2.zero;
            move.x = Input.GetAxisRaw(m_HorizontalAxis);
            move.y = Input.GetAxisRaw(m_VerticalAxis);

            if (Input.GetButtonDown(m_HorizontalAxis))
            {
                if (move.x < 0)
                    move.x = -1f;
                if (move.x > 0)
                    move.x = 1f;
            }
            if (Input.GetButtonDown(m_VerticalAxis))
            {
                if (move.y < 0)
                    move.y = -1f;
                if (move.y > 0)
                    move.y = 1f;
            }
            return move;
        }

        /// <summary>
        /// Process keyboard events.
        /// </summary>
        private bool SendMoveEventToSelectedObject()
        {
            float time = Time.unscaledTime;

            if (!AllowMoveEventProcessing(time))
                return false;

            Vector2 movement = GetRawMoveVector();
            // Debug.Log(m_ProcessingEvent.rawType + " axis:" + m_AllowAxisEvents + " value:" + "(" + x + "," + y + ")");
            var axisEventData = GetAxisEventData(movement.x, movement.y, 0.6f);
            if (!Mathf.Approximately(axisEventData.moveVector.x, 0f)
                || !Mathf.Approximately(axisEventData.moveVector.y, 0f))
            {
                ExecuteEvents.Execute(eventSystem.currentSelectedGameObject, axisEventData, ExecuteEvents.moveHandler);
            }
            m_NextAction = time + 1f / m_InputActionsPerSecond;
            return axisEventData.used;
        }

        

        

        private bool SendUpdateEventToSelectedObject()
        {
            if (eventSystem.currentSelectedGameObject == null)
                return false;

            var data = GetBaseEventData();
            ExecuteEvents.Execute(eventSystem.currentSelectedGameObject, data, ExecuteEvents.updateSelectedHandler);
            return data.used;
        }

        /// <summary>
        /// Process the current mouse press.
        /// </summary>
        private void ProcessMousePress(MouseButtonEventData data)
        {
            var pointerEvent = data.buttonData;
            var currentOverGo = pointerEvent.pointerCurrentRaycast.gameObject;

            // PointerDown notification
            if (data.PressedThisFrame())
            {
                pointerEvent.eligibleForClick = true;
                pointerEvent.delta = Vector2.zero;
                pointerEvent.dragging = false;
                pointerEvent.useDragThreshold = true;
                pointerEvent.pressPosition = pointerEvent.position;
                pointerEvent.pointerPressRaycast = pointerEvent.pointerCurrentRaycast;
                
                DeselectIfSelectionChanged(currentOverGo, pointerEvent);

                // search for the control that will receive the press
                // if we can't find a press handler set the press
                // handler to be what would receive a click.
                var newPressed = ExecuteEvents.ExecuteHierarchy(currentOverGo, pointerEvent, ExecuteEvents.pointerDownHandler);

                // didnt find a press handler... search for a click handler
                if (newPressed == null)
                    newPressed = ExecuteEvents.GetEventHandler<IPointerClickHandler>(currentOverGo);

                // Debug.Log("Pressed: " + newPressed);

                float time = Time.unscaledTime;

                if (newPressed == pointerEvent.lastPress)
                {
                    var diffTime = time - pointerEvent.clickTime;
                    if (diffTime < 0.3f)
                        ++pointerEvent.clickCount;
                    else
                        pointerEvent.clickCount = 1;

                    pointerEvent.clickTime = time;
                }
                else
                {
                    pointerEvent.clickCount = 1;
                }

                pointerEvent.pointerPress = newPressed;
                pointerEvent.rawPointerPress = currentOverGo;

                pointerEvent.clickTime = time;

                // Save the drag handler as well
                pointerEvent.pointerDrag = ExecuteEvents.GetEventHandler<IDragHandler>(currentOverGo);

                if (pointerEvent.pointerDrag != null)
                    ExecuteEvents.Execute(pointerEvent.pointerDrag, pointerEvent, ExecuteEvents.initializePotentialDrag);
            }

            // PointerUp notification
            if (data.ReleasedThisFrame())
            {
                // Debug.Log("Executing pressup on: " + pointer.pointerPress);
                ExecuteEvents.Execute(pointerEvent.pointerPress, pointerEvent, ExecuteEvents.pointerUpHandler);

                // Debug.Log("KeyCode: " + pointer.eventData.keyCode);

                // see if we mouse up on the same element that we clicked on...
                var pointerUpHandler = ExecuteEvents.GetEventHandler<IPointerClickHandler>(currentOverGo);

                // PointerClick and Drop events
                if (pointerEvent.pointerPress == pointerUpHandler && pointerEvent.eligibleForClick)
                {
                    ExecuteEvents.Execute(pointerEvent.pointerPress, pointerEvent, ExecuteEvents.pointerClickHandler);
                }
                else if (pointerEvent.pointerDrag != null)
                {
                    ExecuteEvents.ExecuteHierarchy(currentOverGo, pointerEvent, ExecuteEvents.dropHandler);
                }

                pointerEvent.eligibleForClick = false;
                pointerEvent.pointerPress = null;
                pointerEvent.rawPointerPress = null;

                if (pointerEvent.pointerDrag != null && pointerEvent.dragging)
                    ExecuteEvents.Execute(pointerEvent.pointerDrag, pointerEvent, ExecuteEvents.endDragHandler);

                pointerEvent.dragging = false;
                pointerEvent.pointerDrag = null;

                // redo pointer enter / exit to refresh state
                // so that if we moused over somethign that ignored it before
                // due to having pressed on something else
                // it now gets it.
                if (currentOverGo != pointerEvent.pointerEnter)
                {
                    HandlePointerExitAndEnter(pointerEvent, null);
                    HandlePointerExitAndEnter(pointerEvent, currentOverGo);
                }
            }
        }
#endregion
        #region Modified StandaloneInputModule methods
        
        /// <summary>
        /// Process all mouse events. This is the same as the StandaloneInputModule version except that
        /// it takes MouseState as a parameter, allowing it to be used for both Gaze and Mouse 
        /// pointerss.
        /// </summary>
        private void ProcessMouseEvent(MouseState mouseData)
        {
            var pressed = mouseData.AnyPressesThisFrame();
            var released = mouseData.AnyReleasesThisFrame();

            var leftButtonData = mouseData.GetButtonState(PointerEventData.InputButton.Left).eventData;

            if (!UseMouse(pressed, released, leftButtonData.buttonData))
                return;

            // Process the first mouse button fully
            ProcessMousePress(leftButtonData);
            ProcessMove(leftButtonData.buttonData);
            ProcessDrag(leftButtonData.buttonData);

            // Now process right / middle clicks
            ProcessMousePress(mouseData.GetButtonState(PointerEventData.InputButton.Right).eventData);
            ProcessDrag(mouseData.GetButtonState(PointerEventData.InputButton.Right).eventData.buttonData);
            ProcessMousePress(mouseData.GetButtonState(PointerEventData.InputButton.Middle).eventData);
            ProcessDrag(mouseData.GetButtonState(PointerEventData.InputButton.Middle).eventData.buttonData);

            if (!Mathf.Approximately(leftButtonData.buttonData.scrollDelta.sqrMagnitude, 0.0f))
            {
                var scrollHandler = ExecuteEvents.GetEventHandler<IScrollHandler>(leftButtonData.buttonData.pointerCurrentRaycast.gameObject);
                ExecuteEvents.ExecuteHierarchy(scrollHandler, leftButtonData.buttonData, ExecuteEvents.scrollHandler);
            }
        }
        
        /// <summary>
        /// Process this InputModule. Same as the StandaloneInputModule version, except that it calls
        /// ProcessMouseEvent twice, once for gaze pointers, and once for mouse pointers.
        /// </summary>
        public override void Process()
        {
            bool usedEvent = SendUpdateEventToSelectedObject();

            if (eventSystem.sendNavigationEvents)
            {
                if (!usedEvent)
                    usedEvent |= SendMoveEventToSelectedObject();

                if (!usedEvent)
                    SendSubmitEventToSelectedObject();
            }

            ProcessMouseEvent(GetGazePointerData());
#if !UNITY_ANDROID
            ProcessMouseEvent(GetCanvasPointerData());
#endif
        }
        /// <summary>
        /// Decide if mouse events need to be processed this frame. Same as StandloneInputModule except
        /// that the IsPointerMoving method from this class is used, instead of the method on PointerEventData
        /// </summary>
       private static bool UseMouse(bool pressed, bool released, PointerEventData pointerData)
        {
            if (pressed || released || IsPointerMoving(pointerData) || pointerData.IsScrolling())
                return true;

            return false;
        }
        #endregion

        
        /// <summary>
        /// Convenience function for cloning PointerEventData
        /// </summary>
        /// <param name="from">Copy this value</param>
        /// <param name="to">to this object</param>
        protected void CopyFromTo(OVRRayPointerEventData @from, OVRRayPointerEventData @to)
        {
            @to.position = @from.position;
            @to.delta = @from.delta;
            @to.scrollDelta = @from.scrollDelta;
            @to.pointerCurrentRaycast = @from.pointerCurrentRaycast;
            @to.pointerEnter = @from.pointerEnter;
            @to.worldSpaceRay = @from.worldSpaceRay;
        }
        /// <summary>
        /// Convenience function for cloning PointerEventData
        /// </summary>
        /// <param name="from">Copy this value</param>
        /// <param name="to">to this object</param>
        protected void CopyFromTo(PointerEventData @from, PointerEventData @to)
        {
            @to.position = @from.position;
            @to.delta = @from.delta;
            @to.scrollDelta = @from.scrollDelta;
            @to.pointerCurrentRaycast = @from.pointerCurrentRaycast;
            @to.pointerEnter = @from.pointerEnter;
        }
        

        // In the following region we extend the PointerEventData system implemented in PointerInputModule
        // We define an additional dictionary for ray(e.g. gaze) based pointers. Mouse pointers still use the dictionary
        // in PointerInputModule
#region PointerEventData pool

        // Pool for OVRRayPointerEventData for ray based pointers
        protected Dictionary<int, OVRRayPointerEventData> m_VRRayPointerData = new Dictionary<int, OVRRayPointerEventData>();

        
        protected bool GetPointerData(int id, out OVRRayPointerEventData data, bool create)
        {
            if (!m_VRRayPointerData.TryGetValue(id, out data) && create)
            {
                data = new OVRRayPointerEventData(eventSystem)
                {
                    pointerId = id,
                };

                m_VRRayPointerData.Add(id, data);
                return true;
            }
            return false;
        }

        /// <summary>
        /// Clear pointer state for both types of pointer
        /// </summary>
        protected new void ClearSelection()
        {
            var baseEventData = GetBaseEventData();

            foreach (var pointer in m_PointerData.Values)
            {
                // clear all selection
                HandlePointerExitAndEnter(pointer, null);
            }
            foreach (var pointer in m_VRRayPointerData.Values)
            {
                // clear all selection
                HandlePointerExitAndEnter(pointer, null);
            }

            m_PointerData.Clear();
            eventSystem.SetSelectedGameObject(null, baseEventData);
        }
#endregion

        /// <summary>
        /// For RectTransform, calculate it's normal in world space
        /// </summary>
        static Vector3 GetRectTransformNormal(RectTransform rectTransform)
        {
            Vector3[] corners = new Vector3[4];
            rectTransform.GetWorldCorners(corners);
            Vector3 BottomEdge = corners[3] - corners[0];
            Vector3 LeftEdge = corners[1] - corners[0];
            rectTransform.GetWorldCorners(corners);
            return Vector3.Cross(LeftEdge, BottomEdge).normalized;
        }
       
        private readonly MouseState m_MouseState = new MouseState();
        // Overridden so that we can process the two types of pointer separately


        // The following 2 functions are equivalent to PointerInputModule.GetMousePointerEventData but are customized to
        // get data for ray pointers and canvas mouse pointers.
        
        /// <summary>
        /// State for a pointer controlled by a world space ray. E.g. gaze pointer
        /// </summary>
        /// <returns></returns>
        protected MouseState GetGazePointerData()
        {
            // Get the OVRRayPointerEventData reference
            OVRRayPointerEventData leftData;
            GetPointerData(kMouseLeftId, out leftData, true );
            leftData.Reset();
            
            //Now set the world space ray. This ray is what the user uses to point at UI elements
            leftData.worldSpaceRay = new Ray(rayTransform.position, rayTransform.forward);
            leftData.scrollDelta = GetExtraScrollDelta();

            //Populate some default values
            leftData.button = PointerEventData.InputButton.Left;
            leftData.useDragThreshold = true;
            // Perform raycast to find intersections with world
            eventSystem.RaycastAll(leftData, m_RaycastResultCache);
            var raycast = FindFirstRaycast(m_RaycastResultCache);
            leftData.pointerCurrentRaycast = raycast;
            m_RaycastResultCache.Clear();

            OVRRaycaster ovrRaycaster = raycast.module as OVRRaycaster;
            // We're only interested in intersections from OVRRaycasters
            if (ovrRaycaster) 
            {
                // The Unity UI system expects event data to have a screen position
                // so even though this raycast came from a world space ray we must get a screen
                // space position for the camera attached to this raycaster for compatability
                leftData.position = ovrRaycaster.GetScreenPosition(raycast);
                

                // Find the world position and normal the Graphic the ray intersected
                RectTransform graphicRect = raycast.gameObject.GetComponent<RectTransform>();
                if (graphicRect != null)
                {
                    // Set are gaze indicator with this world position and normal
                    Vector3 worldPos = raycast.worldPosition;
                    Vector3 normal = GetRectTransformNormal(graphicRect);
                    OVRGazePointer.instance.SetPosition(worldPos, normal);
                    // Make sure it's being shown
                    OVRGazePointer.instance.RequestShow();
                }
            }
            OVRPhysicsRaycaster physicsRaycaster = raycast.module as OVRPhysicsRaycaster;
            if (physicsRaycaster)
            {
                leftData.position = physicsRaycaster.GetScreenPos(raycast.worldPosition);
                OVRGazePointer.instance.RequestShow();
                OVRGazePointer.instance.SetPosition(raycast.worldPosition, raycast.worldNormal);
            }




            // Stick default data values in right and middle slots for compatability

            // copy the apropriate data into right and middle slots
            OVRRayPointerEventData rightData;
            GetPointerData(kMouseRightId, out rightData, true );
            CopyFromTo(leftData, rightData);
            rightData.button = PointerEventData.InputButton.Right;

            OVRRayPointerEventData middleData;
            GetPointerData(kMouseMiddleId, out middleData, true );
            CopyFromTo(leftData, middleData);
            middleData.button = PointerEventData.InputButton.Middle;


            m_MouseState.SetButtonState(PointerEventData.InputButton.Left, GetGazeButtonState(), leftData);
            m_MouseState.SetButtonState(PointerEventData.InputButton.Right, PointerEventData.FramePressState.NotChanged, rightData);
            m_MouseState.SetButtonState(PointerEventData.InputButton.Middle, PointerEventData.FramePressState.NotChanged, middleData);
            return m_MouseState;
        }

        /// <summary>
        /// Get state for pointer which is a pointer moving in world space across the surface of a world space canvas.
        /// </summary>
        /// <returns></returns>
        protected MouseState GetCanvasPointerData()
        {
            // Get the OVRRayPointerEventData reference
            PointerEventData leftData;
            GetPointerData(kMouseLeftId, out leftData, true );
            leftData.Reset();
            
            // Setup default values here. Set position to zero because we don't actually know the pointer
            // positions. Each canvas knows the position of its canvas pointer.
            leftData.position = Vector2.zero;
            leftData.scrollDelta = Input.mouseScrollDelta;
            leftData.button = PointerEventData.InputButton.Left;

            if (activeGraphicRaycaster)
            {
                // Let the active raycaster find intersections on its canvas
                activeGraphicRaycaster.RaycastPointer(leftData, m_RaycastResultCache);
                var raycast = FindFirstRaycast(m_RaycastResultCache);
                leftData.pointerCurrentRaycast = raycast;
                m_RaycastResultCache.Clear();
                
                OVRRaycaster ovrRaycaster = raycast.module as OVRRaycaster;
                if (ovrRaycaster) // raycast may not actually contain a result
                {
                    // The Unity UI system expects event data to have a screen position
                    // so even though this raycast came from a world space ray we must get a screen
                    // space position for the camera attached to this raycaster for compatability
                    Vector2 position = ovrRaycaster.GetScreenPosition(raycast);
                    
                    leftData.delta = position - leftData.position;
                    leftData.position = position;
                }
            }

            // copy the apropriate data into right and middle slots
            PointerEventData rightData;
            GetPointerData(kMouseRightId, out rightData, true );
            CopyFromTo(leftData, rightData);
            rightData.button = PointerEventData.InputButton.Right;

            PointerEventData middleData;
            GetPointerData(kMouseMiddleId, out middleData, true );
            CopyFromTo(leftData, middleData);
            middleData.button = PointerEventData.InputButton.Middle;

            m_MouseState.SetButtonState(PointerEventData.InputButton.Left, StateForMouseButton(0), leftData);
            m_MouseState.SetButtonState(PointerEventData.InputButton.Right, StateForMouseButton(1), rightData);
            m_MouseState.SetButtonState(PointerEventData.InputButton.Middle, StateForMouseButton(2), middleData);
            return m_MouseState;
        }

        /// <summary>
        /// New version of ShouldStartDrag implemented first in PointerInputModule. This version differs in that
        /// for ray based pointers it makes a decision about whether a drag should start based on the angular change
        /// the pointer has made so far, as seen from the camera. This also works when the world space ray is 
        /// translated rather than rotated, since the beginning and end of the movement are considered as angle from
        /// the same point.
        /// </summary>
        private bool ShouldStartDrag(PointerEventData pointerEvent)
        {
            if (!pointerEvent.useDragThreshold)
                return true;

            if (pointerEvent as OVRRayPointerEventData == null)
            {
                 // Same as original behaviour for canvas based pointers
                return (pointerEvent.pressPosition - pointerEvent.position).sqrMagnitude >= eventSystem.pixelDragThreshold * eventSystem.pixelDragThreshold;
            }
            else
            {
                // When it's not a screen space pointer we have to look at the angle it moved rather than the pixels distance
                // For gaze based pointing screen-space distance moved will always be near 0
                Vector3 cameraPos = pointerEvent.pressEventCamera.transform.position;
                Vector3 pressDir = (pointerEvent.pointerPressRaycast.worldPosition - cameraPos).normalized;
                Vector3 currentDir = (pointerEvent.pointerCurrentRaycast.worldPosition - cameraPos).normalized;
                return Vector3.Dot(pressDir, currentDir) < Mathf.Cos(Mathf.Deg2Rad * (angleDragThreshold));
            }
        }

        /// <summary>
        /// The purpose of this function is to allow us to switch between using the standard IsPointerMoving
        /// method for mouse driven pointers, but to always return true when it's a ray based pointer. 
        /// All real-world ray-based input devices are always moving so for simplicity we just return true
        /// for them. 
        /// 
        /// If PointerEventData.IsPointerMoving was virtual we could just override that in
        /// OVRRayPointerEventData.
        /// </summary>
        /// <param name="pointerEvent"></param>
        /// <returns></returns>
        static bool IsPointerMoving(PointerEventData pointerEvent)
        {
            OVRRayPointerEventData rayPointerEventData = pointerEvent as OVRRayPointerEventData;
            if (rayPointerEventData != null)
                return true;
            else
                return pointerEvent.IsPointerMoving();
        }

        /// <summary>
        /// Exactly the same as the code from PointerInputModule, except that we call our own
        /// IsPointerMoving.
        /// 
        /// This would also not be necessary if PointerEventData.IsPointerMoving was virtual
        /// </summary>
        /// <param name="pointerEvent"></param>
        protected override void ProcessDrag(PointerEventData pointerEvent)
        {
            bool moving = IsPointerMoving(pointerEvent);
            if (moving && pointerEvent.pointerDrag != null
                && !pointerEvent.dragging
                && ShouldStartDrag(pointerEvent))
            {
                ExecuteEvents.Execute(pointerEvent.pointerDrag, pointerEvent, ExecuteEvents.beginDragHandler);
                pointerEvent.dragging = true;
            }

            // Drag notification
            if (pointerEvent.dragging && moving && pointerEvent.pointerDrag != null)
            {
                // Before doing drag we should cancel any pointer down state
                // And clear selection!
                if (pointerEvent.pointerPress != pointerEvent.pointerDrag)
                {
                    ExecuteEvents.Execute(pointerEvent.pointerPress, pointerEvent, ExecuteEvents.pointerUpHandler);

                    pointerEvent.eligibleForClick = false;
                    pointerEvent.pointerPress = null;
                    pointerEvent.rawPointerPress = null;
                }
                ExecuteEvents.Execute(pointerEvent.pointerDrag, pointerEvent, ExecuteEvents.dragHandler);
            }
        }
       
        /// <summary>
        /// Get state of button corresponding to gaze pointer
        /// </summary>
        /// <returns></returns>
        protected PointerEventData.FramePressState GetGazeButtonState()
        {
            var pressed = Input.GetKeyDown(gazeClickKey) || OVRInput.GetDown(joyPadClickButton);
            var released = Input.GetKeyUp(gazeClickKey) || OVRInput.GetUp(joyPadClickButton);

#if UNITY_ANDROID && !UNITY_EDITOR
            pressed |= Input.GetMouseButtonDown(0);
            released |= Input.GetMouseButtonUp(0);
#endif

            if (pressed && released)
                return PointerEventData.FramePressState.PressedAndReleased;
            if (pressed)
                return PointerEventData.FramePressState.Pressed;
            if (released)
                return PointerEventData.FramePressState.Released;
            return PointerEventData.FramePressState.NotChanged;
        }
        
        /// <summary>
        /// Get extra scroll delta from gamepad
        /// </summary>
        protected Vector2 GetExtraScrollDelta()
        {
            Vector2 scrollDelta = new Vector2();
            if (useLeftStickScroll)
            {
                float x = OVRInput.Get(OVRInput.Axis2D.PrimaryThumbstick).x;
                float y = OVRInput.Get(OVRInput.Axis2D.PrimaryThumbstick).y;
                if (Mathf.Abs(x) < leftStickDeadZone) x = 0;
                if (Mathf.Abs(y) < leftStickDeadZone) y = 0;
                scrollDelta = new Vector2 (x,y);   
            }
            return scrollDelta;
        }
    };
}

Oculus Mobile SDK 1.0.3

Overview of Major Changes

Multi-view

Mobile SDK 1.0.3 adds multi-view rendering support. Multi-view rendering allows drawing to both eye views simultaneously, significantly reducing driver API overhead. It includes GPU optimizations for geometry processing.

Preliminary testing has shown that multi-view can provide:

  • 25-50% reduction in CPU time consumed by the application
  • 5% reduction in GPU time on the ARM Mali
  • 5%-10% reduction in power draw

Obviously the freed up CPU time could be used to issue more draw calls. However, instead of issuing more draw calls, we recommend that applications maintain the freed up CPU time for use by the driver threads to reduce/eliminate screen tears.

While current driver implementations of multi-view primarily reduce the CPU usage, the GPU usage is not always unaffected. On the Exynos based devices, multi-view not only reduces the CPU load, but slightly reduces the GPU load by only computing the view-independent vertex attributes once for both eyes, instead of separately for each eye.

Even though there are significant savings in CPU time, these savings do not directly translate into a similar reduction in power draw. The power drawn by the CPU is only a fraction of the total power drawn by the device (which includes the GPU, memory bandwidth, display etc.).

Although all applications will have their unique set of challenges to consider, multi-view should allow most applications to lower the CPU clock frequency (CPU level) which will in turn improve power usage and the thermal envelope. However, this does not help on the Exynos based devices where CPU level 1, 2 and 3 all use the same clock frequency.

Multi-view will not be available on all Gear VR devices until driver and system updates become available.

The current set of supported devices as of the date of this release is:

  • S6 / Android M
  • S6+ / Android M
  • S6 Edge / Android M
  • Note 5 / Android M
  • Exynos S7 / Android M
  • Exynos S7+ / Android M

For detailed instructions on how to structure a native application for multi-view rendering, see Migrating to Mobile 1.0.3.

We are working with Unity and Epic to support multi-view in Unity and Unreal Engine.

VrAppInterface

VrAppInterface has been refactored to simplify the interface, support multi-view rendering, and enforce per-frame determinism. We highly recommend updating your VrAppInterface based application to support multi-view. However, even if you are not planning on supporting multi-view, it would be good to adopt the VrAppInterface changes because they also pave the way for Vulkan support in the future.

VrApi

VrAppFramework-based applications now explicitly pass EGL objects to VrApi. Previously, the various VrApi functions had to be called from a thread with a specific EGLContext current. The current EGLContext and EGLSurface were basically invisible parameters to the VrApi functions. By explicitly passing the necessary EGL objects to the API, there are no threading restrictions.

Volume notifier is now rendered automatically in VrApi as a TimeWarp layer – the application is no longer responsible for generating and displaying this notification. Be sure not to render your own volume interface!

Build process

Various build steps have been moved from the Python build scripts into Gradle.

New Features

  • Volume Notifier now rendered automatically in VrApi as a TimeWarp Layer.
  • VrAppFramework now supports multi-view rendering path.
  • VrAppFramework now uses explicit EGL objects.
  • GlTexture now supports RGBA ASTC compressed textures.

API Changes

  • VrAppInterface::OneTimeInit and VrAppInterface::NewIntent have been replaced by VrAppInterface::EnteredVrMode. This function is called right after an application entered VR mode.
  • VrAppInterface::OneTimeShutdown has been removed in favor of moving shutdown code to the destructor of the VrAppInterface derived class.
  • VrAppInterface::LeavingVrMode is now called right before the application is about to leave VR mode.
  • VrAppInterface::Frame now takes an ovFrameInput structure and returns an ovrFrameResult structure.
  • VrAppInterface::OnKeyEvent was removed. Key events are now explicitly handled in VrAppInterface::Frame.
  • VrApi ovrModeParmFlags now provide VRAPI_MODE_FLAG_NATIVE_WINDOW for specifying the ANativeWindow explicitly.

Bug Fixes

  • Fixed docked / mounted queries to be accurate without requiring an initial event.
  • Sample apps no longer prompt for an SD card on devices that don’t support external memory.

Known Issues

  • When converting your app to be multi-view compliant, ensure that your System Activities version is at least 1.0.3.1 or you will receive a required system update message.
/************************************************************************************
Filename    :   VrApi.h
Content     :   Minimum necessary API for mobile VR
Created     :   June 25, 2014
Authors     :   John Carmack
Copyright   :   Copyright 2014 Oculus VR, LLC. All Rights reserved.
*************************************************************************************/
#ifndef OVR_VrApi_h
#define OVR_VrApi_h

#include <jni.h>
#include "OVR_CAPI.h"
#include "TimeWarpParms.h"

extern "C" {

struct ovrModeParms
{
	// Shipping applications will almost always want this on,
	// but if you want to draw directly to the screen for
	// debug tasks, you can run synchronously so the init
	// thread is still current on the window.
	bool	AsynchronousTimeWarp;

	// If true, warn and allow the app to continue at 30fps when 
	// throttling occurs.
	// If false, display the level 2 error message which requires
	// the user to undock.
	bool	AllowPowerSave;

	// Optional distortion file to override built in distortion
	const char * DistortionFileName;

	// Set true to enable the image server, which allows a
	// remote device to view frames from the current VR session.
	bool	EnableImageServer;

	// This thread, in addition to the calling one, will
	// get SCHED_FIFO.
	int		GameThreadTid;

	// These are fixed clock levels.
	int		CpuLevel;
	int		GpuLevel;

	// The java Activity object is needed to get the windowManager,
	// packageName, systemService, etc.
	jobject ActivityObject;
};

struct ovrHmdInfo
{
	// Currently returns a conservative 1024
	int		SuggestedEyeResolution;

	// This is a product of the lens distortion and the screen size,
	// but there is no truly correct answer.
	//
	// There is a tradeoff in resolution and coverage.
	// Too small of an fov will leave unrendered pixels visible, but too
	// large wastes resolution or fill rate.  It is unreasonable to
	// increase it until the corners are completely covered, but we do
	// want most of the outside edges completely covered.
	//
	// Applications might choose to render a larger fov when angular
	// acceleration is high to reduce black pull in at the edges by
	// TimeWarp.
	//
	// Currently 90.0.
	float	SuggestedEyeFov;
};

// This must be called by a function called directly from a java thread,
// preferably at JNI_OnLoad().  It will fail if called from a pthread created
// in native code due to the class-lookup issue:
//
// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
//
// This calls ovr_Initialize() internally.
void		ovr_OnLoad( JavaVM * JavaVm_ );

// VR context
// To support the meta-UI, which involves two different Android activities
// running in the same process, we need to maintain two separate contexts
// for a lot of the video related systems.
struct ovrMobile;

// Starts up TimeWarp, vsync tracking, sensor reading, clock locking, thread scheduling,
// and sets video options.  The calling thread will be given SCHED_FIFO.
// Should be called when the app is both resumed and has a valid window surface.
// The application must have their preferred OpenGL ES context current so the correct
// version and config can be matched for the background TimeWarp thread.
// On return, the context will be current on an invisible pbuffer, because TimeWarp
// will own the window.
ovrMobile *	ovr_EnterVrMode( ovrModeParms parms, ovrHmdInfo * returnedHmdInfo );

// Shut everything down for window destruction.
//
// The ovrMobile object is freed by this function.
//
// Calling from a thread other than the one that called ovr_EnterVrMode will be
// a fatal error.
void        ovr_LeaveVrMode( ovrMobile * ovr );

// Accepts a new pos + texture set that will be used for future warps.
// The parms are copied, and are not referenced after the function returns.
//
// The application GL context that rendered the eye images must be current,
// but drawing does not need to be completed.  A sync object will be added
// to the current context so the background thread can know when it is ready to use.
//
// This will block until the textures from the previous
// WarpSwap have completed rendering, to allow one frame of overlap for maximum
// GPU utilization, but prevent multiple frames from piling up variable latency.
//
// This will block until at least one vsync has passed since the last
// call to WarpSwap to prevent applications with simple scenes from
// generating completely wasted frames.
//
// IMPORTANT: you must triple buffer the textures that you pass to WarpSwap
// to avoid flickering and performance problems.
//
// Calling from a thread other than the one that called ovr_EnterVrMode will be
// a fatal error.
void		ovr_WarpSwap( ovrMobile * ovr, const TimeWarpParms * parms );

// Warp swap pushing black to the screen.
void		ovr_WarpSwapBlack( ovrMobile * ovr );

// Warp swap showing a spinning loading icon.
void		ovr_WarpSwapLoadingIcon( ovrMobile * ovr );

// Called when the window surface is destroyed to make sure warp swap
// is never called without a valid window surface.
void		ovr_SurfaceDestroyed( ovrMobile * ovr );

// Handle power level state changes and Hmd events, such as
// mount/unmount dock/undock
void		ovr_HandleDeviceStateChanges( ovrMobile * ovr );

// Start up the platform UI for pass-through camera, reorient, exit, etc.
// The current app will be covered up by the platform UI, but will be
// returned to when it finishes.
//
// You must have the following in the AndroidManifest.xml file for this to work:
//<activity
//  android:name="com.oculusvr.vrlib.PlatformActivity"
//	android:theme="@android:style/Theme.Black.NoTitleBar.Fullscreen"
//	android:launchMode="singleTask"
//	android:screenOrientation="landscape"
//	android:configChanges="orientation|keyboardHidden|keyboard">
//</activity>

// Platform UI class name.
#define PUI_CLASS_NAME "com.oculusvr.vrlib.PlatformActivity"

// Platform UI command strings.
#define PUI_GLOBAL_MENU "globalMenu"
#define PUI_GLOBAL_MENU_TUTORIAL "globalMenuTutorial"
#define PUI_CONFIRM_QUIT "confirmQuit"
#define PUI_THROTTLED1	"throttled1"	// Warn that Power Save Mode has been activated
#define PUI_THROTTLED2	"throttled2"	// Warn that Minimum Mode has been activated
#define PUI_HMT_UNMOUNT	"HMT_unmount"	// the HMT has been taken off the head
#define PUI_HMT_MOUNT	"HMT_mount"		// the HMT has been placed on the head
#define PUI_WARNING		"warning"		// the HMT has been placed on the head and a warning message shows

void		ovr_StartPackageActivity( ovrMobile * ovr, const char * className, const char * commandString );

enum eExitType
{
	EXIT_TYPE_FINISH,			// This will finish the current activity.
	EXIT_TYPE_FINISH_AFFINITY,	// This will finish all activities on the stack.
	EXIT_TYPE_EXIT				// This calls ovr_Shutdown() and exit(0).
								// Must be called from the Java thread!
};
void		ovr_ExitActivity( ovrMobile * ovr, eExitType type );

// Handle Hmd Events such as mount/unmount, dock/undock
enum eHMTMountState
{
	HMT_MOUNT_NONE,			// nothing to do
	HMT_MOUNT_MOUNTED,		// the HMT has been placed on the head
	HMT_MOUNT_UNMOUNTED		// the HMT has been removed from the head
};

struct HMTMountState_t
{
	HMTMountState_t() : 
		MountState( HMT_MOUNT_NONE ) 
	{ 
	}

	explicit HMTMountState_t( eHMTMountState const mountState ) :
		MountState( mountState )
	{
	}

	eHMTMountState	MountState;
};

// Call to query the current mount state.
HMTMountState_t ovr_GetExternalHMTMountState();
// Call to reset the current mount state once it's been acted upon.
void ovr_ResetExternalHMTMountState();
// Call to allow the application to handle or not handle HMT mounting.
void ovr_SetExternalHMTMountHandling( bool const handleExternally );

//-----------------------------------------------------------------
// Functions present in the Oculus CAPI
//
// You should not use any other CAPI functions.
//-----------------------------------------------------------------

// There is a single ovrHmd that is initialized at ovr_onLoad() time.
// This is necessary to handle multiple VR activities in a single address
// space, because the second one is started before the first one shuts
// down, so they would not be able to acquire and release the hmd without
// it being double-owned during the hand off.
extern	ovrHmd	OvrHmd;

/*
// Returns global, absolute high-resolution time in seconds. This is the same
// value as used in sensor messages.
double      ovr_GetTimeInSeconds();
// Returns sensor state reading based on the specified absolute system time.
// Pass absTime value of 0.0 to request the most recent sensor reading; in this case
// both PredictedPose and SamplePose will have the same value.
// ovrHmd_GetEyePredictedSensorState relies on this internally.
// This may also be used for more refined timing of FrontBuffer rendering logic, etc.
//
// allowSensorCreate is ignored on mobile.
ovrSensorState ovrHmd_GetSensorState( ovrHmd hmd, double absTime, bool allowSensorCreate );
// Recenters the orientation on the yaw axis.
void        ovrHmd_RecenterYaw( ovrHmd hmd );
*/

//-----------------------------------------------------------------
// Miscellaneous functions
//-----------------------------------------------------------------

enum eBatteryStatus
{
	BATTERY_STATUS_CHARGING,
	BATTERY_STATUS_DISCHARGING,
	BATTERY_STATUS_FULL,
	BATTERY_STATUS_NOT_CHARGING,
	BATTERY_STATUS_UNKNOWN
};

struct batteryState_t
{
    int             level;          // in range [0,100]
    int             temperature;    // in tenths of a degree Centigrade
    eBatteryStatus  status;
};

// While VrMode is active, we get battery state updates from Android.
// This can be used to pop up low battery and temperature warnings.
batteryState_t ovr_GetBatteryState();

// While VrMode is active, we get volume updates from Android.
int ovr_GetVolume();
double ovr_GetTimeSinceLastVolumeChange();

// returns the current WIFI signal level
int ovr_GetWifiSignalLevel();

enum eWifiState
{
	WIFI_STATE_DISABLED,
	WIFI_STATE_DISABLING,
	WIFI_STATE_ENABLED,
	WIFI_STATE_ENABLING,
	WIFI_STATE_UNKNOWN
};
eWifiState ovr_GetWifiState();

// returns the current cellular signal level
int ovr_GetCellularSignalLevel();

enum eCellularState
{
	CELLULAR_STATE_IN_SERVICE,
	CELLULAR_STATE_OUT_OF_SERVICE,
	CELLULAR_STATE_EMERGENCY_ONLY,
	CELLULAR_STATE_POWER_OFF
};
eCellularState ovr_GetCellularState();

// While VrMode is active, we get headset plugged/unplugged updates
// from Android.
bool ovr_GetHeadsetPluggedState();

bool ovr_GetPowerLevelStateThrottled();
bool ovr_GetPowerLevelStateMinimum();

void ovr_ResetClockLocks( ovrMobile * ovr );

// returns the value of a specific build string.  Valid names are:
enum eBuildString
{
	BUILDSTR_BRAND,
	BUILDSTR_DEVICE,
	BUILDSTR_DISPLAY,
	BUILDSTR_FINGERPRINT,
	BUILDSTR_HARDWARE,
	BUILDSTR_HOST,
	BUILDSTR_ID,
	BUILDSTR_MODEL,
	BUILDSTR_PRODUCT,
	BUILDSTR_SERIAL,
	BUILDSTR_TAGS,
	BUILDSTR_TYPE,
	BUILDSTR_MAX
};

char const * ovr_GetBuildString( eBuildString const id );

}	// extern "C"

#endif	// OVR_VrApi_h

http://www.gearvrf.org/bin/view/GearVRfDeveloperGuide/GearVRfDevGuide200Start

gdfngdfngd

dfghdfhg

    private void SaveAppend()
    {
        sWrite = new StreamWriter("LogCat_" + ".txt",true);
        for (int i = 0; i < logsList.Count; i++)
        {
            ///logsList[i].Message.TrimEnd('\r', '\n');
            
            sWrite.WriteLine(logsList[i].Type + " | " + String.Format("{0:0000000}", lineNo++) + " | " + logsList[i].CreationDate + " | " + logsList[i].Message);

        }
        sWrite.Close();
    }