• Main Page
  • Related Pages
  • Namespaces
  • Classes
  • Files
  • File List
  • File Members

felix_driver.cpp

Go to the documentation of this file.
00001 
00002 #include <iostream>
00003 #include <fstream>
00004 #include "felix_driver.H"
00005 #include "Albany_CismSTKMeshStruct.hpp"
00006 #include "Teuchos_ParameterList.hpp"
00007 #include "Teuchos_RCP.hpp"
00008 #include "Albany_Utils.hpp"
00009 #include "Albany_SolverFactory.hpp"
00010 #include "Teuchos_XMLParameterListHelpers.hpp"
00011 #include <stk_mesh/base/FieldData.hpp>
00012 #include "Piro_PerformSolve.hpp"
00013 #include <stk_io/IossBridge.hpp>
00014 #include <stk_io/MeshReadWriteUtils.hpp>
00015 #include <stk_mesh/base/GetEntities.hpp>
00016 #include <stk_mesh/base/FieldData.hpp>
00017 #include <Ionit_Initializer.h>
00018 #include "Albany_OrdinarySTKFieldContainer.hpp"
00019 #include "Thyra_EpetraThyraWrappers.hpp"
00020 
00021 //uncomment the following if you want to write stuff out to matrix market to debug
00022 //#define WRITE_TO_MATRIX_MARKET 
00023 
00024 #ifdef WRITE_TO_MATRIX_MARKET
00025 #include "EpetraExt_MultiVectorOut.h"
00026 #include "EpetraExt_BlockMapOut.h"
00027 #endif 
00028 
00029 Teuchos::RCP<Albany::CismSTKMeshStruct> meshStruct;
00030 Teuchos::RCP<const Epetra_Comm> mpiComm;
00031 Teuchos::RCP<Teuchos::ParameterList> appParams;
00032 Teuchos::RCP<Teuchos::ParameterList> discParams;
00033 Teuchos::RCP<Albany::SolverFactory> slvrfctry;
00034 Teuchos::RCP<Thyra::ModelEvaluator<double> > solver;
00035 //IK, 11/14/13: what is reducedComm for? 
00036 MPI_Comm comm, reducedComm;
00037 bool interleavedOrdering; 
00038 int nNodes2D; //number global nodes in the domain in 2D 
00039 int nNodesProc2D; //number of nodes on each processor in 2D  
00040 //vector used to renumber nodes on each processor from the Albany convention (horizontal levels first) to the CISM convention (vertical layers first)
00041 std::vector<int> cismToAlbanyNodeNumberMap; 
00042 
00043 
00044 int rank, number_procs;
00045 long  cism_communicator; 
00046 int cism_process_count, my_cism_rank;
00047 double dew, dns;
00048 long * dimInfo;        
00049 int * dimInfoGeom; 
00050 long ewlb, ewub, nslb, nsub;
00051 long ewn, nsn, upn, nhalo; 
00052 long global_ewn, global_nsn; 
00053 double * seconds_per_year_ptr, * gravity_ptr, * rho_ice_ptr, * rho_seawater_ptr;
00054 double * thicknessDataPtr, *topographyDataPtr;
00055 double * upperSurfaceDataPtr, * lowerSurfaceDataPtr;
00056 double * floating_maskDataPtr, * ice_maskDataPtr, * lower_cell_locDataPtr;
00057 long nCellsActive;
00058 int nNodes, nElementsActive;  
00059 double* xyz_at_nodes_Ptr, *surf_height_at_nodes_Ptr, *beta_at_nodes_Ptr;
00060 double *flwa_at_active_elements_Ptr; 
00061 int * global_node_id_owned_map_Ptr; 
00062 int * global_element_conn_active_Ptr; 
00063 int * global_element_id_active_owned_map_Ptr; 
00064 int * global_basal_face_conn_active_Ptr; 
00065 int * global_basal_face_id_active_owned_map_Ptr; 
00066 double *uVel_ptr; 
00067 double *vVel_ptr; 
00068 bool first_time_step = true; 
00069 Teuchos::RCP<Epetra_Map> node_map; 
00070 
00071 
00072 Teuchos::RCP<const Epetra_Vector>
00073 epetraVectorFromThyra(
00074   const Teuchos::RCP<const Epetra_Comm> &comm,
00075   const Teuchos::RCP<const Thyra::VectorBase<double> > &thyra)
00076 {
00077   Teuchos::RCP<const Epetra_Vector> result;
00078   if (Teuchos::nonnull(thyra)) {
00079     const Teuchos::RCP<const Epetra_Map> epetra_map = Thyra::get_Epetra_Map(*thyra->space(), comm);
00080     result = Thyra::get_Epetra_Vector(*epetra_map, thyra);
00081   }
00082   return result;
00083 }
00084 
00085 Teuchos::RCP<const Epetra_MultiVector>
00086 epetraMultiVectorFromThyra(
00087   const Teuchos::RCP<const Epetra_Comm> &comm,
00088   const Teuchos::RCP<const Thyra::MultiVectorBase<double> > &thyra)
00089 {
00090   Teuchos::RCP<const Epetra_MultiVector> result;
00091   if (Teuchos::nonnull(thyra)) {
00092     const Teuchos::RCP<const Epetra_Map> epetra_map = Thyra::get_Epetra_Map(*thyra->range(), comm);
00093     result = Thyra::get_Epetra_MultiVector(*epetra_map, thyra);
00094   }
00095   return result;
00096 }
00097 
00098 void epetraFromThyra(
00099   const Teuchos::RCP<const Epetra_Comm> &comm,
00100   const Teuchos::Array<Teuchos::RCP<const Thyra::VectorBase<double> > > &thyraResponses,
00101   const Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Thyra::MultiVectorBase<double> > > > &thyraSensitivities,
00102   Teuchos::Array<Teuchos::RCP<const Epetra_Vector> > &responses,
00103   Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Epetra_MultiVector> > > &sensitivities)
00104 {
00105   responses.clear();
00106   responses.reserve(thyraResponses.size());
00107   typedef Teuchos::Array<Teuchos::RCP<const Thyra::VectorBase<double> > > ThyraResponseArray;
00108   for (ThyraResponseArray::const_iterator it_begin = thyraResponses.begin(),
00109       it_end = thyraResponses.end(),
00110       it = it_begin;
00111       it != it_end;
00112       ++it) {
00113     responses.push_back(epetraVectorFromThyra(comm, *it));
00114   }
00115 
00116   sensitivities.clear();
00117   sensitivities.reserve(thyraSensitivities.size());
00118   typedef Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Thyra::MultiVectorBase<double> > > > ThyraSensitivityArray;
00119   for (ThyraSensitivityArray::const_iterator it_begin = thyraSensitivities.begin(),
00120       it_end = thyraSensitivities.end(),
00121       it = it_begin;
00122       it != it_end;
00123       ++it) {
00124     ThyraSensitivityArray::const_reference sens_thyra = *it;
00125     Teuchos::Array<Teuchos::RCP<const Epetra_MultiVector> > sens;
00126     sens.reserve(sens_thyra.size());
00127     for (ThyraSensitivityArray::value_type::const_iterator jt = sens_thyra.begin(),
00128         jt_end = sens_thyra.end();
00129         jt != jt_end;
00130         ++jt) {
00131         sens.push_back(epetraMultiVectorFromThyra(comm, *jt));
00132     }
00133     sensitivities.push_back(sens);
00134   }
00135 }
00136 
00137 
00138 extern "C" void felix_driver_();
00139 
00140 //What is exec_mode??
00141 void felix_driver_init(int argc, int exec_mode, FelixToGlimmer * ftg_ptr, const char * input_fname)
00142 { 
00143 
00144     // ---------------------------------------------
00145     //get communicator / communicator info from CISM
00146     //TO DO: ifdef to check if CISM and Albany have MPI?  
00147     //#ifdef HAVE_MPI
00148     //#else
00149     //#endif
00150     // ---------------------------------------------
00151     // The following line needs to change...  It is for a serial run...
00152     cism_communicator = *(ftg_ptr -> getLongVar("communicator","mpi_vars"));
00153     cism_process_count = *(ftg_ptr -> getLongVar("process_count","mpi_vars"));
00154     my_cism_rank = *(ftg_ptr -> getLongVar("my_rank","mpi_vars"));
00155     //get MPI_COMM from Fortran
00156     comm = MPI_Comm_f2c(cism_communicator);
00157     //MPI_COMM_size (comm, &cism_process_count); 
00158     //MPI_COMM_rank (comm, &my_cism_rank); 
00159     //convert comm to Epetra_Comm 
00160     //mpiComm = Albany::createEpetraCommFromMpiComm(reducedComm); 
00161     mpiComm = Albany::createEpetraCommFromMpiComm(comm); 
00162   
00163     if (mpiComm->MyPID() == 0) {
00164       std::cout << "In felix_driver..." << std::endl;
00165       std::cout << "Printing this from Albany...  This worked!  Yay!  Nov. 14 2013" << std::endl; 
00166     }
00167 
00168 
00169     // ---------------------------------------------
00170     // get geometry info from CISM  
00171     //IK, 11/14/13: these things may not be needed in Albany/FELIX...  for now they are passed anyway.
00172     // ---------------------------------------------
00173     
00174     if (mpiComm->MyPID() == 0) std::cout << "Getting geometry info from CISM..." << std::endl; 
00175     dimInfo = ftg_ptr -> getLongVar("dimInfo","geometry");
00176     dew = *(ftg_ptr -> getDoubleVar("dew","numerics"));
00177     dns = *(ftg_ptr -> getDoubleVar("dns","numerics"));
00178     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver: dew, dns = " << dew << "  " << dns << std::endl;
00179     dimInfoGeom = new int[dimInfo[0]+1];    
00180     for (int i=0;i<=dimInfo[0];i++) dimInfoGeom[i] = dimInfo[i];   
00181     if (mpiComm->MyPID() == 0) {
00182       std::cout << "DimInfoGeom  in felix_driver: " << std::endl;
00183       for (int i=0;i<=dimInfoGeom[0];i++) std::cout << dimInfoGeom[i] << " ";
00184       std::cout << std::endl;
00185     }
00186     global_ewn = dimInfoGeom[2]; 
00187     global_nsn = dimInfoGeom[3]; 
00188     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver: global_ewn = " << global_ewn << ", global_nsn = " << global_nsn << std::endl;
00189     ewlb = *(ftg_ptr -> getLongVar("ewlb","geometry"));
00190     ewub = *(ftg_ptr -> getLongVar("ewub","geometry"));
00191     nslb = *(ftg_ptr -> getLongVar("nslb","geometry"));
00192     nsub = *(ftg_ptr -> getLongVar("nsub","geometry"));
00193     nhalo = *(ftg_ptr -> getLongVar("nhalo","geometry"));
00194     ewn = *(ftg_ptr -> getLongVar("ewn","geometry"));
00195     nsn = *(ftg_ptr -> getLongVar("nsn","geometry"));
00196     upn = *(ftg_ptr -> getLongVar("upn","geometry"));
00197     std::cout << "In felix_driver: Proc #" << mpiComm->MyPID() << ", ewn = " << ewn << ", nsn = " << nsn << ", upn = " << upn << ", nhalo = " << nhalo << std::endl;
00198 
00199 
00200     // ---------------------------------------------
00201     // get constants from CISM
00202     // IK, 11/14/13: these things may not be needed in Albany/FELIX...  for now they are passed anyway.
00203     // ---------------------------------------------
00204  
00205     seconds_per_year_ptr = ftg_ptr -> getDoubleVar("seconds_per_year","constants");
00206     gravity_ptr = ftg_ptr -> getDoubleVar("gravity","constants");
00207     rho_ice_ptr = ftg_ptr -> getDoubleVar("rho_ice","constants");
00208     rho_seawater_ptr = ftg_ptr -> getDoubleVar("rho_seawater","constants");
00209     thicknessDataPtr = ftg_ptr -> getDoubleVar("thck","geometry");
00210     topographyDataPtr = ftg_ptr -> getDoubleVar("topg","geometry");
00211     upperSurfaceDataPtr = ftg_ptr -> getDoubleVar("usrf","geometry");
00212     lowerSurfaceDataPtr = ftg_ptr -> getDoubleVar("lsrf","geometry");
00213     floating_maskDataPtr = ftg_ptr -> getDoubleVar("floating_mask","geometry");
00214     ice_maskDataPtr = ftg_ptr -> getDoubleVar("ice_mask","geometry");
00215     lower_cell_locDataPtr = ftg_ptr -> getDoubleVar("lower_cell_loc","geometry");
00216 
00217     // ---------------------------------------------
00218     // get connectivity arrays from CISM 
00219     // IK, 11/14/13: these things may not be needed in Albany/FELIX...  for now they are passed anyway.
00220     // ---------------------------------------------
00221     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver: grabbing connectivity array pointers from CISM..." << std::endl; 
00222     //IK, 11/13/13: check that connectivity derived types are transfered over from CISM to Albany/FELIX    
00223     nCellsActive = *(ftg_ptr -> getLongVar("nCellsActive","connectivity")); 
00224     std::cout << "In felix_driver: Proc #" << mpiComm->MyPID() << ", nCellsActive = " << nCellsActive <<  std::endl;
00225     xyz_at_nodes_Ptr = ftg_ptr -> getDoubleVar("xyz_at_nodes","connectivity"); 
00226     surf_height_at_nodes_Ptr = ftg_ptr -> getDoubleVar("surf_height_at_nodes","connectivity"); 
00227     beta_at_nodes_Ptr = ftg_ptr -> getDoubleVar("beta_at_nodes","connectivity");
00228     flwa_at_active_elements_Ptr = ftg_ptr -> getDoubleVar("flwa_at_active_elements","connectivity"); 
00229     global_node_id_owned_map_Ptr = ftg_ptr -> getInt4Var("global_node_id_owned_map","connectivity");  
00230     global_element_conn_active_Ptr = ftg_ptr -> getInt4Var("global_element_conn_active","connectivity");  
00231     global_element_id_active_owned_map_Ptr = ftg_ptr -> getInt4Var("global_element_id_active_owned_map","connectivity");  
00232     global_basal_face_conn_active_Ptr = ftg_ptr -> getInt4Var("global_basal_face_conn_active","connectivity");  
00233     global_basal_face_id_active_owned_map_Ptr = ftg_ptr -> getInt4Var("global_basal_face_id_active_owned_map","connectivity");  
00234     if (mpiComm->MyPID() == 0) std::cout << "...done!" << std::endl; 
00235 
00236     // ---------------------------------------------
00237     // get u and v velocity solution from Glimmer-CISM 
00238     // IK, 11/26/13: need to concatenate these into a single solve for initial condition for Albany/FELIX solve  
00239     // ---------------------------------------------
00240     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver: grabbing pointers to u and v velocities in CISM..." << std::endl; 
00241     uVel_ptr = ftg_ptr ->getDoubleVar("uvel", "velocity"); 
00242     vVel_ptr = ftg_ptr ->getDoubleVar("vvel", "velocity"); 
00243     if (mpiComm->MyPID() == 0) std::cout << "...done!" << std::endl; 
00244     
00245 
00246 
00247     // ---------------------------------------------
00248     // create Albany mesh  
00249     // ---------------------------------------------
00250     // Read input file, the name of which is provided in the Glimmer/CISM .config file.
00251     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver: creating Albany mesh struct..." << std::endl; 
00252     slvrfctry = Teuchos::rcp(new Albany::SolverFactory(input_fname, comm));
00253     discParams = Teuchos::sublist(Teuchos::rcp(&slvrfctry->getParameters(),false), "Discretization", true);
00254     Teuchos::RCP<Albany::StateInfoStruct> sis=Teuchos::rcp(new Albany::StateInfoStruct);
00255     Albany::AbstractFieldContainer::FieldContainerRequirements req;
00256     req.push_back("Surface Height");
00257     req.push_back("Temperature");
00258     req.push_back("Basal Friction");
00259     req.push_back("Thickness");
00260     req.push_back("Flow Factor");
00261     int neq = 2; //number of equations - 2 for FO Stokes
00262     //IK, 11/14/13, debug output: check that pointers that are passed from CISM are not null 
00263     //std::cout << "DEBUG: xyz_at_nodes_Ptr:" << xyz_at_nodes_Ptr << std::endl; 
00264     //std::cout << "DEBUG: surf_height_at_nodes_Ptr:" << surf_height_at_nodes_Ptr << std::endl; 
00265     //std::cout << "DEBUG: beta_at_nodes_Ptr:" << beta_at_nodes_Ptr << std::endl; 
00266     //std::cout << "DEBUG: flwa_at_active_elements_Ptr:" << flwa_at_active_elements_Ptr << std::endl; 
00267     //std::cout << "DEBUG: global_node_id_owned_map_Ptr:" << global_node_id_owned_map_Ptr << std::endl; 
00268     //std::cout << "DEBUG: global_element_conn_active_Ptr:" << global_element_conn_active_Ptr << std::endl; 
00269     //std::cout << "DEBUG: global_basal_face_conn_active_Ptr:" << global_basal_face_conn_active_Ptr << std::endl; 
00270     //std::cout << "DEBUG: global_basal_face_id_active_owned_map_Ptr:" << global_basal_face_id_active_owned_map_Ptr << std::endl;
00271 
00272     nNodes = (ewn-2*nhalo+1)*(nsn-2*nhalo+1)*upn; //number of nodes in mesh (on each processor) 
00273     nElementsActive = nCellsActive*(upn-1); //number of 3D active elements in mesh  
00274     
00275     meshStruct = Teuchos::rcp(new Albany::CismSTKMeshStruct(discParams, mpiComm, xyz_at_nodes_Ptr, global_node_id_owned_map_Ptr, global_element_id_active_owned_map_Ptr, 
00276                                                            global_element_conn_active_Ptr, global_basal_face_id_active_owned_map_Ptr, global_basal_face_conn_active_Ptr, 
00277                                                            beta_at_nodes_Ptr, surf_height_at_nodes_Ptr, flwa_at_active_elements_Ptr, nNodes, nElementsActive, nCellsActive));
00278     meshStruct->constructMesh(mpiComm, discParams, neq, req, sis, meshStruct->getMeshSpecs()[0]->worksetSize);
00279  
00280     interleavedOrdering = meshStruct->getInterleavedOrdering();
00281     Albany::AbstractSTKFieldContainer::VectorFieldType* solutionField;
00282     if(interleavedOrdering)
00283       solutionField = Teuchos::rcp_dynamic_cast<Albany::OrdinarySTKFieldContainer<true> >(meshStruct->getFieldContainer())->getSolutionField();
00284     else
00285       solutionField = Teuchos::rcp_dynamic_cast<Albany::OrdinarySTKFieldContainer<false> >(meshStruct->getFieldContainer())->getSolutionField();
00286 
00287     //Create node_map
00288     //global_node_id_owned_map_Ptr is 1-based, so node_map is 1-based
00289      node_map = Teuchos::rcp(new Epetra_Map(-1, nNodes, global_node_id_owned_map_Ptr, 0, *mpiComm)); //node_map is 1-based
00290 
00291     if (mpiComm->MyPID() == 0) std::cout << "...done!" << std::endl; 
00292 
00293     // ---------------------------------------------
00294     // Set restart solution to the one passed from CISM 
00295     // ---------------------------------------------
00296 
00297     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver: setting initial condition from CISM..." << std::endl; 
00298      //Create vector used to renumber nodes on each processor from the Albany convention (horizontal levels first) to the CISM convention (vertical layers first)
00299      nNodes2D = (global_ewn + 1)*(global_nsn+1); //number global nodes in the domain in 2D 
00300      nNodesProc2D = (nsn-2*nhalo+1)*(ewn-2*nhalo+1); //number of nodes on each processor in 2D  
00301      cismToAlbanyNodeNumberMap.resize(upn*nNodesProc2D);
00302      for (int j=0; j<nsn-2*nhalo+1;j++) { 
00303        for (int i=0; i<ewn-2*nhalo+1; i++) {
00304          for (int k=0; k<upn; k++) { 
00305            int index = k+upn*i + j*(ewn-2*nhalo+1)*upn; 
00306            cismToAlbanyNodeNumberMap[index] = k*nNodes2D + global_node_id_owned_map_Ptr[i+j*(ewn-2*nhalo+1)]; 
00307            //if (mpiComm->MyPID() == 0) 
00308            //  std::cout << "index: " << index << ", cismToAlbanyNodeNumberMap: " << cismToAlbanyNodeNumberMap[index] << std::endl; 
00309           }
00310         }
00311       }
00312 
00313      //The way it worked out, uVel_ptr and vVel_ptr have more nodes than the nodes in the mesh passed to Albany/CISM for the solve.  In particular, 
00314      //there is 1 row of halo elements in uVel_ptr and vVel_ptr.  To account for this, we copy uVel_ptr and vVel_ptr into std::vectors, which do not have the halo elements. 
00315      std::vector<double> uvel_vec(upn*nNodesProc2D); 
00316      std::vector<double> vvel_vec(upn*nNodesProc2D); 
00317      int counter1 = 0; 
00318      int counter2 = 0; 
00319      int local_nodeID;  
00320      for (int j=0; j<nsn-1; j++) {
00321        for (int i=0; i<ewn-1; i++) { 
00322          for (int k=0; k<upn; k++) {
00323            if (j >= nhalo-1 & j < nsn-nhalo) {
00324              if (i >= nhalo-1 & i < ewn-nhalo) { 
00325                local_nodeID = node_map->LID(cismToAlbanyNodeNumberMap[counter1]); 
00326                uvel_vec[counter1] = uVel_ptr[counter2]; 
00327                vvel_vec[counter1] = vVel_ptr[counter2]; 
00328                counter1++;
00329             }
00330             }
00331             counter2++; 
00332          }
00333         }
00334      }
00335      //Loop over all the elements to find which nodes are active.  For the active nodes, copy uvel and vvel from CISM into Albany solution array to 
00336      //use as initial condition.
00337      //NOTE: there is some inefficiency here by looping over all the elements.  TO DO? pass only active nodes from Albany-CISM to improve this? 
00338      for (int i=0; i<nElementsActive; i++) {
00339        for (int j=0; j<8; j++) {
00340         int node_GID =  global_element_conn_active_Ptr[i + nElementsActive*j]; //node_GID is 1-based
00341         int node_LID =  node_map->LID(node_GID); //node_LID is 0-based
00342         stk::mesh::Entity& node = *meshStruct->bulkData->get_entity(meshStruct->metaData->node_rank(), node_GID);
00343         double* sol = stk::mesh::field_data(*solutionField, node);
00344         sol[0] = uvel_vec[node_LID];
00345         sol[1] = vvel_vec[node_LID];
00346       }
00347     }
00348 
00349     if (mpiComm->MyPID() == 0) std::cout << "...done!" << std::endl; 
00350  
00351     // clean up
00352     if (mpiComm->MyPID() == 0) std::cout << "exec mode = " << exec_mode << std::endl;
00353 
00354 }
00355 
00356 // The solve is done in the felix_driver_run function, and the solution is passed back to Glimmer-CISM 
00357 // IK, 12/3/13: time_inc_yr and cur_time_yr are not used here... 
00358 void felix_driver_run(FelixToGlimmer * ftg_ptr, double& cur_time_yr, double time_inc_yr)
00359 {
00360 
00361     //IK, 12/9/13: how come FancyOStream prints an all processors??    
00362     Teuchos::RCP<Teuchos::FancyOStream> out(Teuchos::VerboseObjectBase::getDefaultOStream());
00363 
00364     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver_run, cur_time, time_inc = " << cur_time_yr << "   " << time_inc_yr << std::endl;
00365     // ---------------------------------------------------------------------------------------------------
00366     // Solve 
00367     // ---------------------------------------------------------------------------------------------------
00368 
00369     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver_run: starting the solve... " << std::endl;
00370     //Need to set HasRestart solution such that uvel_Ptr and vvel_Ptr (u and v from Glimmer/CISM) are always set as initial condition?  
00371     meshStruct->setHasRestartSolution(!first_time_step);
00372  
00373     Teuchos::RCP<Albany::AbstractSTKMeshStruct> stkMeshStruct = meshStruct;
00374     discParams->set("STKMeshStruct",stkMeshStruct);
00375     Teuchos::RCP<Teuchos::ParameterList> paramList = Teuchos::rcp(&slvrfctry->getParameters(),false);
00376     //TO DO: add checking if first time step or not
00377     if (!first_time_step)
00378     {
00379        meshStruct->setRestartDataTime(paramList->sublist("Problem").get("Homotopy Restart Step", 1.));
00380        double homotopy = paramList->sublist("Problem").sublist("FELIX Viscosity").get("Glen's Law Homotopy Parameter", 1.0);
00381        if(meshStruct->restartDataTime()== homotopy)
00382          paramList->sublist("Problem").set("Solution Method", "Steady");
00383     }
00384     Teuchos::RCP<Albany::Application> app = Teuchos::rcp(new Albany::Application(mpiComm, paramList));
00385     solver = slvrfctry->createThyraSolverAndGetAlbanyApp(app, mpiComm, mpiComm);
00386 
00387 
00388     Teuchos::ParameterList solveParams;
00389     solveParams.set("Compute Sensitivities", true);
00390     Teuchos::Array<Teuchos::RCP<const Thyra::VectorBase<double> > > thyraResponses;
00391     Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Thyra::MultiVectorBase<double> > > > thyraSensitivities;
00392     Piro::PerformSolveBase(*solver, solveParams, thyraResponses, thyraSensitivities);
00393 
00394      const Epetra_Map& ownedMap(*app->getDiscretization()->getMap()); //owned map
00395      const Epetra_Map& overlapMap(*app->getDiscretization()->getOverlapMap()); //overlap map
00396      Epetra_Import import(overlapMap, ownedMap); //importer from ownedMap to overlapMap 
00397      Epetra_Vector solutionOverlap(overlapMap); //overlapped solution 
00398      solutionOverlap.Import(*app->getDiscretization()->getSolutionField(), import, Insert);
00399     if (mpiComm->MyPID() == 0) std::cout << "..done!" << std::endl;
00400 
00401 #ifdef WRITE_TO_MATRIX_MARKET
00402     //For debug: write solution and maps to matrix market file 
00403      EpetraExt::BlockMapToMatrixMarketFile("node_map.mm", *node_map); 
00404      EpetraExt::BlockMapToMatrixMarketFile("map.mm", ownedMap); 
00405      EpetraExt::BlockMapToMatrixMarketFile("overlap_map.mm", overlapMap); 
00406      EpetraExt::MultiVectorToMatrixMarketFile("solution.mm", *app->getDiscretization()->getSolutionField());
00407 #endif
00408     
00409     // ---------------------------------------------------------------------------------------------------
00410     // Compute sensitivies / responses and perform regression tests
00411     // IK, 12/9/13: how come this is turned off in mpas branch? 
00412     // ---------------------------------------------------------------------------------------------------
00413   
00414     if (mpiComm->MyPID() == 0) std::cout << "Computing responses and sensitivities..." << std::endl;
00415     int status=0; // 0 = pass, failures are incremented
00416     Teuchos::Array<Teuchos::RCP<const Epetra_Vector> > responses;
00417     Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Epetra_MultiVector> > > sensitivities;
00418     epetraFromThyra(mpiComm, thyraResponses, thyraSensitivities, responses, sensitivities);
00419 
00420     const int num_p = solver->Np(); // Number of *vectors* of parameters
00421     const int num_g = solver->Ng(); // Number of *vectors* of responses
00422 
00423     *out << "Finished eval of first model: Params, Responses "
00424       << std::setprecision(12) << std::endl;
00425 
00426     const Thyra::ModelEvaluatorBase::InArgs<double> nominal = solver->getNominalValues();
00427     for (int i=0; i<num_p; i++) {
00428       const Teuchos::RCP<const Epetra_Vector> p_init = epetraVectorFromThyra(mpiComm, nominal.get_p(i));
00429       p_init->Print(*out << "\nParameter vector " << i << ":\n");
00430     }
00431 
00432     for (int i=0; i<num_g-1; i++) {
00433       const Teuchos::RCP<const Epetra_Vector> g = responses[i];
00434       bool is_scalar = true;
00435 
00436       if (app != Teuchos::null)
00437         is_scalar = app->getResponse(i)->isScalarResponse();
00438 
00439       if (is_scalar) {
00440         g->Print(*out << "\nResponse vector " << i << ":\n");
00441 
00442         if (num_p == 0) {
00443           // Just calculate regression data
00444           status += slvrfctry->checkSolveTestResults(i, 0, g.get(), NULL);
00445         } else {
00446           for (int j=0; j<num_p; j++) {
00447             const Teuchos::RCP<const Epetra_MultiVector> dgdp = sensitivities[i][j];
00448             if (Teuchos::nonnull(dgdp)) {
00449               dgdp->Print(*out << "\nSensitivities (" << i << "," << j << "):!\n");
00450             }
00451             status += slvrfctry->checkSolveTestResults(i, j, g.get(), dgdp.get());
00452           }
00453         }
00454       }
00455     }
00456 
00457     *out << "\nNumber of Failed Comparisons: " << status << std::endl;
00458     if (mpiComm->MyPID() == 0) std::cout << "...done!" << std::endl;
00459 
00460 
00461     // ---------------------------------------------------------------------------------------------------
00462     // Copy solution back to glimmer uvel and vvel arrays to be passed back
00463     // ---------------------------------------------------------------------------------------------------
00464 
00465     //std::cout << "overlapMap # global elements: " << overlapMap.NumGlobalElements() << std::endl; 
00466     //std::cout << "overlapMap # my elements: " << overlapMap.NumMyElements() << std::endl; 
00467     //std::cout << "overlapMap: " << overlapMap << std::endl; 
00468     //std::cout << "map # global elements: " << ownedMap.NumGlobalElements() << std::endl; 
00469     //std::cout << "map # my elements: " << ownedMap.NumMyElements() << std::endl; 
00470     //std::cout << "node_map # global elements: " << node_map->NumGlobalElements() << std::endl; 
00471     //std::cout << "node_map # my elements: " << node_map->NumMyElements() << std::endl; 
00472     //std::cout << "node_map: " << *node_map << std::endl; 
00473 
00474     if (mpiComm->MyPID() == 0) std::cout << "In felix_driver_run: copying Albany solution to uvel and vvel to send back to CISM... " << std::endl;
00475     //Epetra_Vectors to hold uvel and vvel to be passed to Glimmer/CISM 
00476     Epetra_Vector uvel(*node_map, true); 
00477     Epetra_Vector vvel(*node_map, true); 
00478 
00479     
00480     if (interleavedOrdering == true) { 
00481       for (int i=0; i<overlapMap.NumMyElements(); i++) { 
00482         int global_dof = overlapMap.GID(i);
00483         double sol_value = solutionOverlap[i];  
00484         int modulo = (global_dof % 2); //check if dof is for u or for v 
00485         int vel_global_dof, vel_local_dof; 
00486         if (modulo == 0) { //u dof 
00487           vel_global_dof = global_dof/2+1; //add 1 because node_map is 1-based 
00488           vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map
00489           //std::cout << "uvel: global_dof = " << global_dof << ", uvel_global_dof = " << vel_global_dof << ", uvel_local_dof = " << vel_local_dof << std::endl; 
00490           uvel.ReplaceMyValues(1, &sol_value, &vel_local_dof); 
00491         }
00492         else { // v dof 
00493           vel_global_dof = (global_dof-1)/2+1; //add 1 because node_map is 1-based 
00494           vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map
00495           vvel.ReplaceMyValues(1, &sol_value, & vel_local_dof); 
00496         }
00497       }
00498     }
00499     else { //note: the case with non-interleaved ordering has not been tested...
00500       int numDofs = overlapMap.NumGlobalElements(); 
00501       for (int i=0; i<overlapMap.NumMyElements(); i++) { 
00502         int global_dof = overlapMap.GID(i);
00503         double sol_value = solutionOverlap[i];  
00504         int vel_global_dof, vel_local_dof; 
00505         if (global_dof < numDofs/2) { //u dof
00506           vel_global_dof = global_dof+1; //add 1 because node_map is 1-based 
00507           vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map
00508           uvel.ReplaceMyValues(1, &sol_value, &vel_local_dof); 
00509         }
00510         else { //v dofs 
00511           vel_global_dof = global_dof-numDofs/2+1; //add 1 because node_map is 1-based
00512           vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map
00513           vvel.ReplaceMyValues(1, &sol_value, & vel_local_dof);
00514         } 
00515       }
00516     }
00517  
00518 
00519 #ifdef WRITE_TO_MATRIX_MARKET
00520     //For debug: write solution to matrix market file 
00521      EpetraExt::MultiVectorToMatrixMarketFile("uvel.mm", uvel); 
00522      EpetraExt::MultiVectorToMatrixMarketFile("vvel.mm", vvel);
00523 #endif
00524  
00525      //Copy uvel and vvel into uVel_ptr and vVel_ptr respectively (the arrays passed back to CISM) according to the numbering consistent w/ CISM. 
00526      int counter1 = 0; 
00527      int counter2 = 0; 
00528      int local_nodeID;  
00529      for (int j=0; j<nsn-1; j++) {
00530        for (int i=0; i<ewn-1; i++) { 
00531          for (int k=0; k<upn; k++) {
00532            if (j >= nhalo-1 & j < nsn-nhalo) {
00533              if (i >= nhalo-1 & i < ewn-nhalo) { 
00534                local_nodeID = node_map->LID(cismToAlbanyNodeNumberMap[counter1]); 
00535                //if (mpiComm->MyPID() == 0) 
00536                //std::cout << "counter1:" << counter1 << ", cismToAlbanyNodeNumberMap[counter1]: " << cismToAlbanyNodeNumberMap[counter1] << ", local_nodeID: " 
00537                //<< local_nodeID << ", uvel: " << uvel[local_nodeID] << std::endl; //uvel[local_nodeID] << std::endl;  
00538                uVel_ptr[counter2] = uvel[local_nodeID];
00539                vVel_ptr[counter2] = vvel[local_nodeID];  
00540                counter1++;
00541             }
00542             }
00543             else {
00544              uVel_ptr[counter2] = 0.0; 
00545              vVel_ptr[counter2] = 0.0; 
00546             }
00547             counter2++; 
00548          }
00549         }
00550       }
00551     
00552     if (mpiComm->MyPID() == 0) std::cout << "..done!" << std::endl;
00553 
00554 
00555     first_time_step = false;
00556  
00557 }
00558   
00559 
00560 //Clean up
00561 //IK, 12/3/13: this is not called anywhere in the interface code...  used to be called (based on old bisicles interface code)?  
00562 void felix_driver_finalize(int amr_obj_index)
00563 {
00564 
00565   std::cout << "In felix_driver_finalize: cleaning up..." << std::endl;
00566   std::cout << "done cleaning up!" << std::endl << std::endl; 
00567   
00568 }
00569 

Generated on Wed Mar 26 2014 18:36:38 for Albany: a Trilinos-based PDE code by  doxygen 1.7.1