Comparing 2D and 3D Modeling and Rendering

2D3D
// In the main program
…
double xyz[6];
c.getOverallMCBoundingBox(xyz);
ModelView::setMCRegionOfInterest(xyz);

c.run();
// In the main program, we will do the same, but more:
…
double xyz[6];
c.getOverallMCBoundingBox(xyz);
set3DViewingInformation(xyz);

c.run();

where set3DViewingInformation(xyz) does:

  1. the "ModelView::setMCRegionOfInterest(xyz)" call. (The current MCRegionOfInterest will be used at render time very much like we saw in 2D. In the 3D situation, the region of interest will be used to specify the (x, y) limits in eye coordinates of the view frustum.)
  2. initialization of the view orientation parameters:
    1. center ← midpoint of region of interest
    2. eye ← center + some vector
    3. up ← an orientation vector
  3. initialization of the projection parameters:
    1. viewing limits along the line of sight (ecZmin, ecZmax) will be initialized according to a heuristic. These values will be used when render is called to set the z limits of the view frustum.
    2. the z coordinate (in the eye coordinate system) of the projection plane (ecZpp) will be initialized according to a heuristic.
    3. projection type will be initialized (ORTHOGONAL, OBLIQUE, or PERSPECTIVE).
void My2DModelView::defineModel()
{
    glGenVertexArrays(numVAOsNeeded, VAOs);
    // for each VAO, i:
    glBindVertexArray(VAOs[i]);

    glGenBuffers(numVBOsNeeded, VBOs);
    // For each per-vertex attribute, j, in current VAO, use
    // glBindBuffer, glBufferData, et al. to send the data to the GPU.
    // For example, suppose PVA j are the vertex coordinates:
    glBindBuffer(VBOs[j]);
    // assume we have created an array of 2D coordinate
    // data in "coords". Assume there are "nCoords" 2D points.
    glBufferData(GL_ARRAY_BUFFER, nCoords*sizeof(vec2), coords,
    		GL_STATIC_DRAW);
    glVertexAttribPointer(shaderIF->pvaLoc("mcPosition"), 2, GL_FLOAT,
    		GL_FALSE, 0, 0);
    glEnableVertexAttribArray(shaderIF->pvaLoc("mcPosition"));
    // END: "For each per-vertex attribute, j, in current VAO"
    // END: "for each VAO, i"
}
void My3DModelView::defineModel()
{
    glGenVertexArrays(numVAOsNeeded, VAOs);
    // for each VAO, i:
    glBindVertexArray(VAOs[i]);

    glGenBuffers(numVBOsNeeded, VBOs);
    // For each per-vertex attribute, j, in current VAO, use
    // glBindBuffer, glBufferData, et al. to send the data to the GPU.
    // For example, suppose PVA j are the vertex coordinates:
    glBindBuffer(VBOs[j]);
    // assume we have created an array of 3D coordinate
    // data in "coords". Assume there are "nCoords" 3D points.
    glBufferData(GL_ARRAY_BUFFER, nCoords*sizeof(vec3), coords,
    		GL_STATIC_DRAW);
    glVertexAttribPointer(shaderIF->pvaLoc("mcPosition"), 3, GL_FLOAT,
    		GL_FALSE, 0, 0);
    glEnableVertexAttribArray(shaderIF->pvaLoc("mcPosition"));
    // END: "For each per-vertex attribute, j, in current VAO"
    // END: "for each VAO, i"
}
If the object whose geometry is being defined is a curved surface, then there will most likely be another set of glBindBuffer-glBufferData-glVertexAttribPointer-glEnableVertexAttribArray calls (using shaderIF->pvaLoc("mcNormal")) to define 3D normal vectors for the vertices. If instead the normals are constant for large sections of the coordinate arrays, this is often handled using glVertexAttrib* calls between successive calls to routines like glDrawArrays in render.
void My2DModelView::render()
{
    // save the current GLSL program in use
    int savedPgm;
    glGetIntegerv(GL_CURRENT_PROGRAM, &savedPgm);
    glUseProgram(shaderIF->getShaderPgmID());

    // define the Window-Viewport map: MC to -1..+1 LDS:
    float scaleTrans[4];
    computeScaleTrans(scaleTrans);
    glUniform4fv(shaderIF->ppuLoc("scaleTrans"), 1, scaleTrans);
    
    
    

    renderMy2DModelView();

    glUseProgram(savedPgm);
}
void My3DModelView::render()
{
    // save the current GLSL program in use
    int savedPgm;
    glGetIntegerv(GL_CURRENT_PROGRAM, &savedPgm);
    glUseProgram(shaderIF->getShaderPgmID());

    // mc_ec: View orientation and dynamic rotation
    // ec_lds: 3D-2D projection, map to LDS, dynamic zoom:
    cryph::Matrix4x4 mc_ec, ec_lds;
    getMatrices(mc_ec, ec_lds);
    float m[16];
    glUniformMatrix4fv(shaderIF->ppuLoc("mc_ec"), 1, false, mc_ec.extractColMajor(m));
    glUniformMatrix4fv(shaderIF->ppuLoc("ec_lds"), 1, false, ec_lds.extractColMajor(m));

    renderMy3DModelView();

    glUseProgram(savedPgm);
}
#version 410 core

// simple2d.vsh - A simplistic vertex shader that illustrates ONE WAY
//                to handle transformations from 2D MC to LDS.


// Naming convention for variables holding coordinates:
// mc - model coordinates
// lds - logical device space







uniform vec4 scaleTrans;

// Per-vertex attributes
in vec2 mcPosition; // incoming vertex position in model coordinates









void main()
{




    // transform mc to lds (equivalent to final line of
    // simple3d.vsh on the right)
    float ldsX = scaleTrans[0]*mcPosition[0] + scaleTrans[1];
    float ldsY = scaleTrans[2]*mcPosition[1] + scaleTrans[3];
    gl_Position = vec4(ldsX, ldsY, 0, 1);
}
#version 410 core

// simple3d.vsh - A simple 3D vertex shader that illustrates ONE WAY
//                to handle viewing transformations and communication
//                with the fragment shader (responsible for lighting model)

// Naming convention for variables holding coordinates:
// mc - model coordinates
// ec - eye coordinates
// lds - logical device space
// "p_" prefix on any of the preceding indicates the coordinates have been
//      embedded in projective space
// (gl_Position would have label p_lds)

// Transformation Matrices
uniform mat4 mc_ec,  // (dynamic rotations) * (ViewOrientation(E,C,up))
             ec_lds; // (W-V map) * (projection matrix)
             
// Per-vertex attributes
in vec3 mcPosition; // incoming vertex position in model coordinates
in vec3 mcNormal; // incoming normal vector in model coordinates

// Pass on data that fragment shader will need for the lighing model:
out PVA
{
    vec3 ecPosition;
    svec3 ecUnitNormal;
} pvaOut;

void main ()
{
    // convert current vertex and its associated normal to eye coordinates
    vec4 p_ecPosition = mc_ec * vec4(mcPosition, 1.0);
    pvaOut.ecPosition = p_ecPosition.xyz/p_ecPosition.w;
    mat3 normalMatrix = transpose( inverse( mat3x3(mc_ec) ) );
    pvaOut.ecUnitNormal = normalize(normalMatrix * mcNormal);

    // apply the projection matrix to compute the projective space representation
    // of the 3D logical device space coordinates of the input vertex:
    gl_Position = ec_lds * p_ecPosition;
}
Fragment Shader: We have seen a wide variety of techniques that can be used to generate a fragment color. Fragment Shader: We will study and implement mathematical models that simulate light-surface interaction to obtain a fragment color.