如何移动与手指一个正方形的OpenGL? [英] How to move a OpenGL square with the finger?

查看:125
本文介绍了如何移动与手指一个正方形的OpenGL?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

其实我有一个GLSurfaceView类的Andr​​oid 1.5 一个应用程序,在屏幕上显示一个简单的方形多边形。

我想学习如何添加新的功能,移动方用手指触摸它的功能。我的意思是,当用户触摸广场和移动手指,方应坚持手指,直到手指松开屏幕。

任何教程/ code例子/帮助将是preciated。

我的code:

 公共类MySurfaceView扩展GLSurfaceView实现渲染器{
私人上下文的背景下;
私人广场广场;
私人浮动xrot; // X旋转
私人浮动yrot; // Y轴旋转
私人浮动zrot; // Z旋转
私人浮动xspeed; // X旋转速度
私人浮动yspeed; // Y轴旋转速度
私人持股量Z = -1.15f; // Profundidad恩报EJEž
私人浮动oldX; //勇敢前代X,第rotación
私人浮动oldY; //勇敢前代Y,对rotación
私人最终浮动TOUCH_SCALE = 0.2F; // necesario对拉rotación

//在初始化code创建矩阵采集对象
私人MatrixGrabber毫克=新MatrixGrabber();

私人布尔firstTimeDone = FALSE; //真司拉aplicación雅公顷内幕交易inicializada。

公共MySurfaceView(上下文的背景下,位图图像){
    超(上下文);
    this.context =背景;
    setEGLConfigChooser(8,​​8,8,8,16,0); //丰多transparente
    。getHolder()和setFormat(PixelFormat.TRANSLUCENT); //丰多transparente
    // Transformamos ESTA化酶连接renderizadora
    this.setRenderer(本);
    //请求的焦点,第阙洛杉矶botones reaccionen
    this.requestFocus();
    this.setFocusableInTouchMode(真正的);
    方=新广场(形象);
}

公共无效onSurfaceCreated(GL10 GL,EGLConfig配置){
    gl.glDisable(GL10.GL_DITHER); //抖动,
    gl.glEnable(GL10.GL_TEXTURE_2D); //纹理映射ON
    gl.glShadeModel(GL10.GL_SMOOTH); //光滑着色
    gl.glClearDepthf(1.0F); //深度缓冲设置
    gl.glEnable(GL10.GL_DEPTH_TEST); //深度测试开启
    gl.glDepthFunc(GL10.GL_LEQUAL);
    gl.glClearColor(0,0,0,0); //丰多transparente
    gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT,GL10.GL_NICEST);
    // Cargamos拉textura德尔立方。
    square.loadGLTexture(GL,this.context);
}

公共无效onDrawFrame(GL10 GL){
    // Limpiamos pantallaÿ深度缓冲
    gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
    gl.glLoadIdentity();
    // Dibujado
    gl.glTranslatef(0.0,0.0,Z); //将z单位到屏幕
    gl.glScalef(0.8f,0.8f,0.8f); // Escalamos对阙quepa EN LA pantalla
    // Rotamos洛杉矶自我EJES。
    gl.glRotatef(xrot,1.0F,0.0,0.0); // X
    gl.glRotatef(yrot,0.0,1.0F,0.0); //ÿ
    gl.glRotatef(zrot,0.0,0.0,1.0F); //ž
    // Dibujamos埃尔夸德拉多
    square.draw(GL);
    // Factores德rotación。
    xrot + = xspeed;
    yrot + = yspeed;


    如果(!firstTimeDone)
    {
        ///////////////新的$ C $下缩放AR图像所需宽度/////////////////
        mg.getCurrentProjection(GL);
        mg.getCurrentModelView(GL);
        浮动[] modelMatrix =新的浮动[16];
        浮动[] projMatrix =新的浮动[16];
        modelMatrix = mg.mModelView;
        projMatrix = mg.mProjection;
        INT [] MVIEW =新INT [4];
        MVIEW [0] = 0;
        MVIEW [1] = 0;
        MVIEW [2] = 800; //宽度
        MVIEW [3] = 480; //高度
        浮动[] outputCoords =新的浮动[3];
        GLU.gluProject(-1.0F,-1.0F,Z,modelMatrix,0,projMatrix,0,MVIEW,0,outputCoords,0);

        INT I = 0;
        System.out.print(ⅰ);
       // firstTimeDone = TRUE;
    }
}

// SI埃尔表面形成层,resetea拉维斯塔,imagino阙ESTO婆娑宽多cambias德MODO纵向/横向Øsacas EL teclado费斯切拉连接Moviles公司TIPO的Droid。
公共无效onSurfaceChanged(GL10 GL,诠释的宽度,高度INT){
    如果(高== 0){
        高度= 1;
    }
    gl.glViewport(0,0,宽度,高度); //重置视口
    gl.glMatrixMode(GL10.GL_PROJECTION); //选择投影矩阵
    gl.glLoadIdentity(); //重置投影矩阵
    //宽高比德拉塔纳
    GLU.gluPerspective(GL,45.0f,(浮动)宽/(浮动)的高度,0.1F,100.0f);
    gl.glMatrixMode(GL10.GL_MODELVIEW); //选择模型视图矩阵
    gl.glLoadIdentity(); //重置模型变换矩阵

}

公共布尔的onTouchEvent(MotionEvent事件){
    浮X = event.getX();
    浮动Y = event.getY();
    开关(event.getAction())
    {
        案例MotionEvent.ACTION_MOVE:
            // Calculamos埃尔坎比奥
            浮DX = X  -  oldX;
            浮DY = Y  -  oldY;
            xrot + = DY * TOUCH_SCALE;
            yrot + = DX * TOUCH_SCALE;
            //Log.w("XXXXXX,ACTION_MOVE_NO_ZOOM);
            打破;
    }
    oldX = X;
    oldY = Y;
    返回true; //萨尔瓦多evento公顷内幕交易manejado
}

公共无效zoomIn(){
    Z = Z + 0.2F;
    如果(Z> -1.0F)
        Z = -1.0F;
}
公共无效zoomOut(){
    Z = Z-0.2F;
    如果(Z< -20.0f)
       Z = -20.0f;
}
公共无效rotateL(){
    zrot = zrot + 3.0F;
}
公共无效rotateR(){
    zrot = zrot-3.0F;
}
公共无效复位()
{
    xrot = 0;
    yrot = 0;
    zrot = 0;
    xspeed = 0;
    yspeed = 0;
    Z = -5.0f;
}
}
 

这是我的广场类:

 公共类方{
//缓冲区德顶点
私人FloatBuffer vertexBuffer;
//缓冲区德coordenadas德texturas
私人FloatBuffer textureBuffer;
// Puntero德texturas
私人INT []纹理=新INT [3];
//厄尔尼诺项目重新presentar
私人位图图像;
//Definición德顶点

私人浮动顶点[] =
{
    -1.0F,-1.0F,0.0,//左下
    1.0F,-1.0F,0.0,//右下
    -1.0F,1.0F,0.0,//左上
    1.0F,1.0F,0.0 //右上
};
/ *
私人浮动顶点[] =
{
-0.8f,-0.8​​f,0.0,//左下
0.8f,-0.8​​f,0.0,//右下
-0.8f,0.8f,0.0,//左上
0.8f,0.8f,0.0
};
* /
// Coordenadas(U,V) - 德拉斯texturas
/ *
私人浮纹[] =
{
    //贴图坐标的顶点
    0.0,0.0,
    0.0,1.0F,
    1.0F,0.0,
    1.0F,1.0F
};
* /
私人浮纹[] =
{
    //贴图坐标的顶点
    0.0,1.0F,
    1.0F,1.0F,
    0.0,0.0,
    1.0F,0.0
};
// Inicializamos洛杉矶缓冲区
市民广场(位图图像​​){
    ByteBuffer的byteBuf = ByteBuffer.allocateDirect(vertices.length * 4);
    byteBuf.order(ByteOrder.nativeOrder());
    vertexBuffer = byteBuf.asFloatBuffer();
    vertexBuffer.put(顶点);
    vertexBuffer.position(0);

    byteBuf = ByteBuffer.allocateDirect(texture.length * 4);
    byteBuf.order(ByteOrder.nativeOrder());
    textureBuffer = byteBuf.asFloatBuffer();
    textureBuffer.put(纹理);
    textureBuffer.position(0);

    this.image =图像;
}
// Funcion德dibujado
公共无效画(GL10 GL){
    gl.glFrontFace(GL10.GL_CCW);
    //gl.glEnable(GL10.GL_BLEND);
    //在这种情况下pviously生成的纹理绑定我们唯一的$ P $
    gl.glBindTexture(GL10.GL_TEXTURE_2D,纹理[0]);
    //指向我们的顶点缓冲
    gl.glVertexPointer(3,GL10.GL_FLOAT,0,vertexBuffer);
    gl.glTexCoordPointer(2,GL10.GL_FLOAT,0,textureBuffer);
    //启用顶点缓冲区
    gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
    gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
    //绘制顶点三角形带
    gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP,0,vertices.length / 3);
    临行前//禁用客户端状态
    gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
    gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
    //gl.glDisable(GL10.GL_BLEND);
}
// Carga德texturas
公共无效loadGLTexture(GL10 GL,上下文语境){
    // Generamos未puntero德texturas
    gl.glGenTextures(1,纹理,0);
    //Ÿ本身LO asignamos一个NUESTRO阵列
    gl.glBindTexture(GL10.GL_TEXTURE_2D,纹理[0]);
    // Creamos筛选现在去texturas
    gl.glTexParameterf(GL10.GL_TEXTURE_2D,GL10.GL_TEXTURE_MIN_FILTER,GL10.GL_NEAREST);
    gl.glTexParameterf(GL10.GL_TEXTURE_2D,GL10.GL_TEXTURE_MAG_FILTER,GL10.GL_LINEAR);
    // Diferentes parametros日textura posibles GL10.GL_CLAMP_TO_EDGE
    gl.glTexParameterf(GL10.GL_TEXTURE_2D,GL10.GL_TEXTURE_WRAP_S,GL10.GL_REPEAT);
    gl.glTexParameterf(GL10.GL_TEXTURE_2D,GL10.GL_TEXTURE_WRAP_T,GL10.GL_REPEAT);
    / *
    字符串的ImagePath =radiocd5.png;
    AssetManager mngr = context.getAssets();
    InputStream的是= NULL;
    尝试 {
        是= mngr.open(的ImagePath);
    }赶上(IOException异常E1){e1.printStackTrace(); }
    * /
    //获取纹理从Android资源目录
    InputStream的是= NULL;
    / *
    如果(item.equals(RIM))
        ,是= context.getResources()openRawResource(R.drawable.rueda);
    否则,如果(item.equals(选择))
        ,是= context.getResources()openRawResource(R.drawable.selector);
    * /
    / *
    ,是= context.getResources()openRawResource(RESOURCEID);
    点阵位图= NULL;
    尝试 {
        位= BitmapFactory.de codeStream(是);
    } 最后 {
        尝试 {
            is.close();
            是=无效;
        }赶上(IOException异常E){
        }
    }
    * /
    点阵位图=图像;
    // CON EL siguientecódigoredimensionamos拉斯imágenes阙肖恩·马斯GRANDES日​​256×256。
    INT东西方妇女网络= bitmap.getWidth();
    INT newH = bitmap.getHeight();
    浮动事实;
    如果(newH> 256 ||东西方妇女网络> 256)
    {
        如果(newH> 256)
        {
            其实=(浮点)255 /(浮点)newH; // porcentaje POR EL阙multiplicar对SER玉野256
            newH =(INT)(newH *事实); // Altura的reducida人porcentaje necesario
            东西方妇女网络=(INT)(东西方妇女网络*事实); // anchura reducida人porcentaje necesario
        }
        如果(东西方妇女网络> 256)
        {
            其实=(浮点)255 /(浮点)东西方妇女网络; // porcentaje POR EL阙multiplicar对SER玉野256
            newH =(INT)(newH *事实); // Altura的reducida人porcentaje necesario
            东西方妇女网络=(INT)(东西方妇女网络*事实); // anchura reducida人porcentaje necesario
        }
        位= Bitmap.createScaledBitmap(位图,东西方妇女网络,newH,真正的);
    }
    // CON EL siguientecódigotransformamosimágenes没有potencia德2间imágenespotencia德2(壶)
    //梅托埃尔位NOPOT恩未位POT对阙没有aparezcan texturas卡斯。
    INT nextPot = 256;
    INT H = bitmap.getHeight();
    INT W = bitmap.getWidth();
    INT offx =(nextPot-瓦特)/ 2; // DISTANCIA respecto一拉左派,对阙拉imagen画质quede centrada EN LA努埃瓦imagen画质POT
    INT offy =(nextPot-H)/ 2; // DISTANCIA respecto一个阿里巴,对阙拉imagen画质quede centrada EN LA努埃瓦imagen画质POT
    位图bitmap2 = Bitmap.createBitmap(nextPot,nextPot,Bitmap.Config.ARGB_8888); // CREA未位transparente格拉西亚斯人ARGB_8888
    帆布comboImage =新的Canvas(bitmap2);
    comboImage.drawBitmap(位图,offx,offy,NULL);
    comboImage.save();

    // Usamos的Andr​​oid GLUtils对espcificar UNA textura日2 dimensiones对NUESTRO位图
    GLUtils.texImage2D(GL10.GL_TEXTURE_2D,0,bitmap2,0);

    // Checkeamos SI EL GL上下文ES版1.1Ÿgeneramos洛杉矶贴图POR标志。思无,llamamos一个圣母propiaimplementación
    如果(GL的instanceof GL11){
        gl.glTexParameterf(GL11.GL_TEXTURE_2D,GL11.GL_GENERATE_MIPMAP,GL11.GL_TRUE);
        GLUtils.texImage2D(GL10.GL_TEXTURE_2D,0,bitmap2,0);
    } 其他 {
        buildMipmap(GL,bitmap2);
    }
    // Limpiamos洛杉矶位图
    bitmap.recycle();
    bitmap2.recycle();
}
//圣母implementación德MIPMAP。 Escalamos埃尔位原hacia瓦霍POR因素去2 Y LO asignamos科莫索尔NIVEL德的mipmap
私人无效buildMipmap(GL10 GL,位图位图){
    INT级别= 0;
    INT高= bitmap.getHeight();
    INT宽度= bitmap.getWidth();
    而(高度> = 1 ||宽度GT; = 1){
        GLUtils.texImage2D(GL10.GL_TEXTURE_2D,水平,位图,0);
        如果(高度== 1 ||宽度== 1){
            打破;
        }
        ++级;
        高度/ = 2;
        宽度/ = 2;
        位图bitmap2 = Bitmap.createScaledBitmap(位图,宽度,高度,真);
        bitmap.recycle();
        位= bitmap2;
    }
}
}
 

解决方案

你看在Android教程code?他们在OpenGL ES的1和2的东西非常相似,这种结合的例子。

在OpenGL ES的1教程,但只是用于处理触摸事件的一节。 <一href="http://developer.android.com/resources/tutorials/opengl/opengl-es10.html#touch">http://developer.android.com/resources/tutorials/opengl/opengl-es10.html#touch

所以,你要修改的glrotatef命令AddMotion部分插入的glTranslatef;

修改

看来你更感兴趣的坐标转换不是选择对象。所以,无论你触摸屏幕上,这就是图像去(而不是触摸和拖动图像,这将意味着选择)。 而你对winZ的问题让我觉得你想gluunproject。 如果是这样的话,你已经知道你winZ,因为你的目的是通过你的Z变量翻译机背。由于您的z是负的,为什么不试试呢?

假设你已经设置你的GLWrapper为您的活动你GLSurfaceView:

  mGLView.setGLWrapper(新GLWrapper(){
        公共GL包装(GL GL){
            返回新MatrixTrackingGL(GL);
        }

    });
 

然后,在你GLSurfaceView /渲染器的子类...

 公众持股量[] unproject(GL10 GL,浮法X,浮动Y){
    mMatrixGrabber.getCurrentState(GL);
    INT []视图= {0,0,this.getWidth(),this.getHeight()};
    浮动[] POS =新的浮动[4];
    浮动[]结果= NULL;
    INT RETVAL = GLU.gluUnProject(X,Y​​,-z,
            mMatrixGrabber.mModelView,0,
            mMatrixGrabber.mProjection,0,
            视图,0,
            POS,0);
    如果(RETVAL!= GL10.GL_TRUE){
        Log.e(unproject,GLU.gluErrorString(RETVAL));
    } 其他 {
        结果=新的浮动[3];
        结果[0] = POS [0] / POS [3];
        结果[1] = POS [1] / POS [3];
        结果[2] = POS [2] / POS [3];
        结果= POS机;
    }
    返回结果;
}
 

然后,你可以修改你的TouchEvent处理程序包含

 开关(event.getAction())
    {
        案例MotionEvent.ACTION_MOVE:
            // Calculamos埃尔坎比奥
            浮DX = X  -  oldX;
            浮DY = Y  -  oldY;
            xrot + = DY * TOUCH_SCALE;
            yrot + = DX * TOUCH_SCALE;
            //Log.w("XXXXXX,ACTION_MOVE_NO_ZOOM);
            感人= TRUE;
            打破;
        案例MotionEvent.ACTION_UP:
            xrot = 0;
            yrot = 0;
            zrot = 0;
            感人= FALSE;
            打破;
    }
 

和把这个下一节在你的画法的其他翻译/规模/旋转电话前:

 如果(触摸){
        浮动[]点= unproject(GL,oldX,(this.getHeight() -  oldY));
        如果(点== NULL){
            Log.e(抽奖,没有意义);
        } 其他 {
            gl.glTranslatef(点[0],点[1],0);
        }
    }
 

希望这给你你想要的结果。

Actually I have an application for Android 1.5 with a GLSurfaceView class that shows a simple square polygon on the screen.

I want to learn how to add a new functionality, the functionality of moving the square touching it with the finger. I mean that when the user touches the square and moves the finger, the square should stick to the finger, until the finger releases the screen.

Any tutorials/code examples/help will be apreciated.

My code:

public class MySurfaceView extends GLSurfaceView implements Renderer {  
private Context context;
private Square square;
private float xrot;                 //X Rotation
private float yrot;                 //Y Rotation
private float zrot;                 //Z Rotation
private float xspeed;               //X Rotation Speed
private float yspeed;               //Y Rotation Speed
private float z = -1.15f;           //Profundidad en el eje Z
private float oldX; //valor anterior de X, para rotación
private float oldY; //valor anterior de Y, para rotación
private final float TOUCH_SCALE = 0.2f;     //necesario para la rotación

//create the matrix grabber object in your initialization code  
private MatrixGrabber mg = new MatrixGrabber();           

private boolean firstTimeDone=false; //true si la aplicación ya ha sido inicializada.

public MySurfaceView(Context context, Bitmap image) {
    super(context);
    this.context = context;
    setEGLConfigChooser(8, 8, 8, 8, 16, 0); //fondo transparente
    getHolder().setFormat(PixelFormat.TRANSLUCENT); //fondo transparente
    //Transformamos esta clase en renderizadora
    this.setRenderer(this);
    //Request focus, para que los botones reaccionen
    this.requestFocus();
    this.setFocusableInTouchMode(true);
    square = new Square(image);                                 
}

public void onSurfaceCreated(GL10 gl, EGLConfig config) {       
    gl.glDisable(GL10.GL_DITHER);               //dithering OFF
    gl.glEnable(GL10.GL_TEXTURE_2D);            //Texture Mapping ON
    gl.glShadeModel(GL10.GL_SMOOTH);            //Smooth Shading 
    gl.glClearDepthf(1.0f);                     //Depth Buffer Setup
    gl.glEnable(GL10.GL_DEPTH_TEST);            //Depth Testing ON
    gl.glDepthFunc(GL10.GL_LEQUAL);
    gl.glClearColor(0,0,0,0); //fondo transparente
    gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);         
    //Cargamos la textura del cubo.
    square.loadGLTexture(gl, this.context);
}

public void onDrawFrame(GL10 gl) {
    //Limpiamos pantalla y Depth Buffer
    gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
    gl.glLoadIdentity();
    //Dibujado
    gl.glTranslatef(0.0f, 0.0f, z);         //Move z units into the screen
    gl.glScalef(0.8f, 0.8f, 0.8f);          //Escalamos para que quepa en la pantalla
    //Rotamos sobre los ejes.
    gl.glRotatef(xrot, 1.0f, 0.0f, 0.0f);   //X
    gl.glRotatef(yrot, 0.0f, 1.0f, 0.0f);   //Y
    gl.glRotatef(zrot, 0.0f, 0.0f, 1.0f);   //Z
    //Dibujamos el cuadrado
    square.draw(gl);    
    //Factores de rotación.
    xrot += xspeed;
    yrot += yspeed;         


    if (!firstTimeDone)
    {       
        /////////////// NEW CODE FOR SCALING THE AR IMAGE TO THE DESIRED WIDTH /////////////////            
        mg.getCurrentProjection(gl); 
        mg.getCurrentModelView(gl);                     
        float [] modelMatrix = new float[16];
        float [] projMatrix = new float[16];
        modelMatrix=mg.mModelView;
        projMatrix=mg.mProjection;          
        int [] mView = new int[4];
        mView[0] = 0;
        mView[1] = 0;
        mView[2] = 800; //width
        mView[3] = 480; //height
        float [] outputCoords = new float[3];
        GLU.gluProject(-1.0f, -1.0f, z, modelMatrix, 0, projMatrix, 0, mView, 0, outputCoords, 0);

        int i=0;
        System.out.print(i);
       // firstTimeDone=true;
    }
}

//si el surface cambia, resetea la vista, imagino que esto pasa cuando cambias de modo portrait/landscape o sacas el teclado físico en móviles tipo Droid.
public void onSurfaceChanged(GL10 gl, int width, int height) {
    if(height == 0) {                       
        height = 1;                         
    }
    gl.glViewport(0, 0, width, height);     //Reset Viewport
    gl.glMatrixMode(GL10.GL_PROJECTION);    //Select Projection Matrix
    gl.glLoadIdentity();                    //Reset Projection Matrix
    //Aspect Ratio de la ventana
    GLU.gluPerspective(gl, 45.0f, (float)width / (float)height, 0.1f, 100.0f);
    gl.glMatrixMode(GL10.GL_MODELVIEW);     //Select Modelview Matrix
    gl.glLoadIdentity();                    //Reset Modelview Matrix        

}

public boolean onTouchEvent(MotionEvent event) {
    float x = event.getX();
    float y = event.getY();
    switch (event.getAction()) 
    {
        case MotionEvent.ACTION_MOVE:
            //Calculamos el cambio
            float dx = x - oldX;
            float dy = y - oldY;
            xrot += dy * TOUCH_SCALE;
            yrot += dx * TOUCH_SCALE;
            //Log.w("XXXXXX", "ACTION_MOVE_NO_ZOOM");
            break;
    }
    oldX = x;
    oldY = y;
    return true; //El evento ha sido manejado
}

public void zoomIn(){ 
    z=z+0.2f;   
    if (z>-1.0f)
        z=-1.0f;
}
public void zoomOut(){ 
    z=z-0.2f; 
    if (z<-20.0f)
       z=-20.0f;
}
public void rotateL(){ 
    zrot=zrot+3.0f; 
}
public void rotateR(){ 
    zrot=zrot-3.0f; 
}   
public void reset()
{
    xrot=0;
    yrot=0;
    zrot=0;
    xspeed=0;
    yspeed=0;
    z = -5.0f;
}
}

This is my square class:

public class Square {
//Buffer de vertices
private FloatBuffer vertexBuffer;
//Buffer de coordenadas de texturas
private FloatBuffer textureBuffer;
//Puntero de texturas
private int[] textures = new int[3];
//El item a representar
private Bitmap image;
//Definición de vertices

private float vertices[] = 
{ 
    -1.0f, -1.0f, 0.0f,     //Bottom Left
    1.0f, -1.0f, 0.0f,      //Bottom Right
    -1.0f, 1.0f, 0.0f,      //Top Left
    1.0f, 1.0f, 0.0f        //Top Right
};
/*  
private float vertices[] = 
{ 
-0.8f, -0.8f, 0.0f,     //Bottom Left
0.8f, -0.8f, 0.0f,      //Bottom Right
-0.8f, 0.8f, 0.0f,      //Top Left
0.8f, 0.8f, 0.0f 
};
*/
//Coordenadas (u, v) de las texturas    
/*
private float texture[] = 
{           
    //Mapping coordinates for the vertices
    0.0f, 0.0f,
    0.0f, 1.0f,
    1.0f, 0.0f,
    1.0f, 1.0f
};
*/
private float texture[] =
{
    //Mapping coordinates for the vertices
    0.0f, 1.0f,
    1.0f, 1.0f,
    0.0f, 0.0f,
    1.0f, 0.0f
};
//Inicializamos los buffers
public Square(Bitmap image) {
    ByteBuffer byteBuf = ByteBuffer.allocateDirect(vertices.length * 4);
    byteBuf.order(ByteOrder.nativeOrder());
    vertexBuffer = byteBuf.asFloatBuffer();
    vertexBuffer.put(vertices);
    vertexBuffer.position(0);

    byteBuf = ByteBuffer.allocateDirect(texture.length * 4);
    byteBuf.order(ByteOrder.nativeOrder());
    textureBuffer = byteBuf.asFloatBuffer();
    textureBuffer.put(texture);
    textureBuffer.position(0);

    this.image=image;
} 
//Funcion de dibujado
public void draw(GL10 gl) {
    gl.glFrontFace(GL10.GL_CCW);
    //gl.glEnable(GL10.GL_BLEND);
    //Bind our only previously generated texture in this case
    gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[0]);
    //Point to our vertex buffer
    gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vertexBuffer);
    gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, textureBuffer);
    //Enable vertex buffer
    gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
    gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
    //Draw the vertices as triangle strip
    gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, vertices.length / 3);
    //Disable the client state before leaving
    gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
    gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
    //gl.glDisable(GL10.GL_BLEND);      
}
//Carga de texturas
public void loadGLTexture(GL10 gl, Context context) {
    //Generamos un puntero de texturas
    gl.glGenTextures(1, textures, 0);       
    //y se lo asignamos a nuestro array
    gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[0]);
    //Creamos filtros de texturas
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST);
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR);
    //Diferentes parametros de textura posibles GL10.GL_CLAMP_TO_EDGE
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_S, GL10.GL_REPEAT);
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_T, GL10.GL_REPEAT);     
    /*
    String imagePath = "radiocd5.png";
    AssetManager mngr = context.getAssets();
    InputStream is=null;
    try {
        is = mngr.open(imagePath);
    } catch (IOException e1) {  e1.printStackTrace();   }
    */
    //Get the texture from the Android resource directory
    InputStream is=null;
    /*
    if (item.equals("rim"))
        is = context.getResources().openRawResource(R.drawable.rueda);
    else if (item.equals("selector"))
        is = context.getResources().openRawResource(R.drawable.selector);
    */      
    /*
    is = context.getResources().openRawResource(resourceId);
    Bitmap bitmap = null;
    try {
        bitmap = BitmapFactory.decodeStream(is);
    } finally {
        try {
            is.close();
            is = null;
        } catch (IOException e) {
        }
    }
    */
    Bitmap bitmap =image;       
    //con el siguiente código redimensionamos las imágenes que sean mas grandes de 256x256.
    int newW=bitmap.getWidth();
    int newH=bitmap.getHeight();
    float fact;
    if (newH>256 || newW>256)
    {
        if (newH>256)
        {
            fact=(float)255/(float)newH; //porcentaje por el que multiplicar para ser tamaño 256
            newH=(int)(newH*fact); //altura reducida al porcentaje necesario
            newW=(int)(newW*fact); //anchura reducida al porcentaje necesario   
        }
        if (newW>256)
        {
            fact=(float)255/(float)newW; //porcentaje por el que multiplicar para ser tamaño 256
            newH=(int)(newH*fact); //altura reducida al porcentaje necesario
            newW=(int)(newW*fact); //anchura reducida al porcentaje necesario
        }
        bitmap=Bitmap.createScaledBitmap(bitmap, newW, newH, true);
    }       
    //con el siguiente código transformamos imágenes no potencia de 2 en imágenes potencia de 2 (pot)
    //meto el bitmap NOPOT en un bitmap POT para que no aparezcan texturas blancas.
    int nextPot=256;
    int h = bitmap.getHeight();
    int w = bitmap.getWidth();
    int offx=(nextPot-w)/2; //distancia respecto a la izquierda, para que la imagen quede centrada en la nueva imagen POT
    int offy=(nextPot-h)/2; //distancia respecto a arriba, para que la imagen quede centrada en la nueva imagen POT
    Bitmap bitmap2 = Bitmap.createBitmap(nextPot, nextPot, Bitmap.Config.ARGB_8888); //crea un bitmap transparente gracias al ARGB_8888
    Canvas comboImage = new Canvas(bitmap2);
    comboImage.drawBitmap(bitmap, offx, offy, null);
    comboImage.save();

    //Usamos Android GLUtils para espcificar una textura de 2 dimensiones para nuestro bitmap
    GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap2, 0);

    //Checkeamos si el GL context es versión 1.1 y generamos los Mipmaps por Flag. Si no, llamamos a nuestra propia implementación
    if(gl instanceof GL11) {
        gl.glTexParameterf(GL11.GL_TEXTURE_2D, GL11.GL_GENERATE_MIPMAP, GL11.GL_TRUE);
        GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap2, 0);
    } else {
        buildMipmap(gl, bitmap2);
    }   
    //Limpiamos los bitmaps
    bitmap.recycle();
    bitmap2.recycle();
}
//Nuestra implementación de MipMap. Escalamos el bitmap original hacia abajo por factor de 2 y lo asignamos como nuevo nivel de mipmap
private void buildMipmap(GL10 gl, Bitmap bitmap) {
    int level = 0;
    int height = bitmap.getHeight();
    int width = bitmap.getWidth();
    while(height >= 1 || width >= 1) {
        GLUtils.texImage2D(GL10.GL_TEXTURE_2D, level, bitmap, 0);
        if(height == 1 || width == 1) {
            break;
        }
        level++;
        height /= 2;
        width /= 2;
        Bitmap bitmap2 = Bitmap.createScaledBitmap(bitmap, width, height, true);
        bitmap.recycle();
        bitmap = bitmap2;
    }
}
}

解决方案

Have you looked at the Android tutorial code? They have something very similar to this with examples in OpenGL ES 1 and 2.

In the OpenGL ES 1 tutorial, there's a section just for handling touch events. http://developer.android.com/resources/tutorials/opengl/opengl-es10.html#touch

So you'd want to modify the AddMotion section from glrotatef command into gltranslatef;

edit

Looks like you're more interested in coordinate conversion than object selection. So, wherever you touch on the screen, that's where the image goes (as opposed to touching and dragging an image, which would imply selection). And your question about winZ makes me think you're trying gluunproject. If that's the case, you already know your winZ because you translate the camera back from the object by your "z" variable. Since your z is negative, why not try this?

Assuming you've set your GLWrapper for your GLSurfaceView in your activity:

    mGLView.setGLWrapper(new GLWrapper() {
        public GL wrap(GL gl) {
            return new MatrixTrackingGL(gl);
        }

    });

Then, in your GLSurfaceView/Renderer subclass...

public float[] unproject(GL10 gl, float x, float y) {
    mMatrixGrabber.getCurrentState(gl);
    int[] view = {0,0,this.getWidth(), this.getHeight()};
    float[] pos = new float[4];
    float[] result = null;
    int retval = GLU.gluUnProject(x, y, -z, 
            mMatrixGrabber.mModelView, 0,
            mMatrixGrabber.mProjection, 0,
            view, 0, 
            pos, 0);
    if (retval != GL10.GL_TRUE) {
        Log.e("unproject", GLU.gluErrorString(retval));
    } else {
        result = new float[3];
        result[0] = pos[0] / pos[3];
        result[1] = pos[1] / pos[3];
        result[2] = pos[2] / pos[3];
        result = pos;
    }
    return result;
}

Then you can modify your TouchEvent handler to contain

    switch (event.getAction()) 
    {
        case MotionEvent.ACTION_MOVE:
            //Calculamos el cambio
            float dx = x - oldX;
            float dy = y - oldY;
            xrot += dy * TOUCH_SCALE;
            yrot += dx * TOUCH_SCALE;
            //Log.w("XXXXXX", "ACTION_MOVE_NO_ZOOM");
            touching = true;
            break;
        case MotionEvent.ACTION_UP:
            xrot = 0;
            yrot = 0;
            zrot = 0;
            touching = false;
            break;
    }

And put this next section in your draw method before the other translate/scale/rotation calls:

    if (touching) {
        float[] point = unproject(gl, oldX, (this.getHeight() - oldY));
        if (point == null) {
            Log.e("Draw", "No Point");
        } else {
            gl.glTranslatef(point[0], point[1], 0);
        }
    }

Hopefully this gives you the result you want.

这篇关于如何移动与手指一个正方形的OpenGL?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆