CVC3
|
00001 /////////////////////////////////////////////////////////////////////////////// 00002 /*! 00003 * \file search_fast.cpp 00004 * 00005 * Author: Mark Zavislak <zavislak@stanford.edu> 00006 * Undergraduate 00007 * Stanford University 00008 * 00009 * Created: Mon Jul 21 23:52:39 UTC 2003 00010 * 00011 * <hr> 00012 * 00013 * License to use, copy, modify, sell and/or distribute this software 00014 * and its documentation for any purpose is hereby granted without 00015 * royalty, subject to the terms and conditions defined in the \ref 00016 * LICENSE file provided with this distribution. 00017 * 00018 * <hr> 00019 */ 00020 /////////////////////////////////////////////////////////////////////////////// 00021 00022 #include "search_fast.h" 00023 #include "typecheck_exception.h" 00024 #include "search_rules.h" 00025 #include "command_line_flags.h" 00026 #include "cdmap.h" 00027 #include "decision_engine_dfs.h" 00028 //#include "decision_engine_caching.h" 00029 //#include "decision_engine_mbtf.h" 00030 #include "expr_transform.h" 00031 #include "assumptions.h" 00032 00033 00034 using namespace CVC3; 00035 using namespace std; 00036 00037 00038 //! When set to true, match Chaff behavior as close as possible 00039 #define followChaff false 00040 00041 00042 void SearchEngineFast::ConflictClauseManager::setRestorePoint() 00043 { 00044 TRACE("conflict clauses", 00045 "ConflictClauseManager::setRestorePoint(): scope=", 00046 d_se->scopeLevel(), ""); 00047 d_se->d_conflictClauseStack.push_back(new deque<ClauseOwner>()); 00048 d_se->d_conflictClauses = d_se->d_conflictClauseStack.back(); 00049 d_restorePoints.push_back(d_se->scopeLevel()); 00050 } 00051 00052 00053 void SearchEngineFast::ConflictClauseManager::notify() 00054 { 00055 if (d_restorePoints.size() > 0) { 00056 int scope = d_restorePoints.back(); 00057 if (scope > d_se->scopeLevel()) { 00058 TRACE("conflict clauses", 00059 "ConflictClauseManager::notify(): restore to scope ", scope, ""); 00060 d_restorePoints.pop_back(); 00061 while (d_se->d_conflictClauses->size() > 0) 00062 d_se->d_conflictClauses->pop_back(); 00063 delete d_se->d_conflictClauseStack.back(); 00064 d_se->d_conflictClauseStack.pop_back(); 00065 d_se->d_conflictClauses = d_se->d_conflictClauseStack.back(); 00066 } 00067 } 00068 } 00069 00070 00071 //! Constructor 00072 SearchEngineFast::SearchEngineFast(TheoryCore* core) 00073 : SearchImplBase(core), 00074 d_name("fast"), 00075 d_unitPropCount(core->getStatistics().counter("unit propagations")), 00076 d_circuitPropCount(core->getStatistics().counter("circuit propagations")), 00077 d_conflictCount(core->getStatistics().counter("conflicts")), 00078 d_conflictClauseCount(core->getStatistics().counter("conflict clauses")), 00079 d_clauses(core->getCM()->getCurrentContext()), 00080 d_unreportedLits(core->getCM()->getCurrentContext()), 00081 d_unreportedLitsHandled(core->getCM()->getCurrentContext()), 00082 d_nonLiterals(core->getCM()->getCurrentContext()), 00083 d_nonLiteralsSaved(core->getCM()->getCurrentContext()), 00084 d_simplifiedThm(core->getCM()->getCurrentContext()), 00085 d_nonlitQueryStart(core->getCM()->getCurrentContext()), 00086 d_nonlitQueryEnd(core->getCM()->getCurrentContext()), 00087 d_clausesQueryStart(core->getCM()->getCurrentContext()), 00088 d_clausesQueryEnd(core->getCM()->getCurrentContext()), 00089 d_conflictClauseManager(core->getCM()->getCurrentContext(), this), 00090 d_literalSet(core->getCM()->getCurrentContext()), 00091 d_useEnqueueFact(false), 00092 d_inCheckSAT(false), 00093 d_litsAlive(core->getCM()->getCurrentContext()), 00094 d_litsMaxScorePos(0), 00095 d_splitterCount(0), 00096 d_litSortCount(0), 00097 d_berkminFlag(false) 00098 { 00099 // if (core->getFlags()["de"].getString() == "caching") 00100 // d_decisionEngine = new DecisionEngineCaching(core, this); 00101 // else if (core->getFlags()["de"].getString() == "mbtf") 00102 // d_decisionEngine = new DecisionEngineMBTF(core, this); 00103 // else 00104 d_decisionEngine = new DecisionEngineDFS(core, this); 00105 00106 IF_DEBUG ( 00107 d_nonLiterals.setName("CDList[SearchEngineDefault.d_nonLiterals]"); 00108 d_clauses.setName("CDList[SearchEngineDefault.d_clauses]"); 00109 ) 00110 00111 d_conflictClauseStack.push_back(new deque<ClauseOwner>()); 00112 d_conflictClauses = d_conflictClauseStack.back(); 00113 } 00114 00115 //! Destructor 00116 /*! We own the proof rules (d_rules) and the variable manager (d_vm); 00117 delete them. */ 00118 SearchEngineFast::~SearchEngineFast() { 00119 for (unsigned i=0; i < d_circuits.size(); i++) 00120 delete d_circuits[i]; 00121 delete d_decisionEngine; 00122 for(size_t i=0, iend=d_conflictClauseStack.size(); i<iend; ++i) 00123 delete d_conflictClauseStack[i]; 00124 } 00125 00126 00127 /*! @brief Return a ref to the vector of watched literals. If no 00128 such vector exists, create it. */ 00129 /*! This function is normally used when the value of 'literal' 00130 * becomes false. The vector contains pointers to all clauses where 00131 * this literal occurs, and therefore, these clauses may cause unit 00132 * propagation. In any case, the watch pointers in these clauses 00133 * need to be updated. 00134 */ 00135 vector<std::pair<Clause, int> >& 00136 SearchEngineFast::wp(const Literal& literal) { 00137 // return d_wp[literal.getExpr()]; 00138 return literal.wp(); 00139 } 00140 00141 #ifdef _CVC3_DEBUG_MODE 00142 static void checkAssump(const Theorem& t, const Theorem& orig, 00143 const CDMap<Expr,Theorem>& assumptions) { 00144 const Assumptions& a(t.getAssumptionsRef()); 00145 const Assumptions::iterator iend = a.end(); 00146 if(!(!t.isAssump() || assumptions.count(t.getExpr()) > 0)) 00147 orig.printDebug(); 00148 DebugAssert((!t.isAssump() || assumptions.count(t.getExpr()) > 0), 00149 "checkAssump: found stray theorem:\n " 00150 + t.toString()); 00151 if(t.isAssump()) return; 00152 for (Assumptions::iterator i = a.begin(); i != iend; ++i) { 00153 if (!i->isFlagged()) { 00154 i->setFlag(); 00155 // comparing only TheoremValue pointers in .find() 00156 checkAssump(*i, orig, assumptions); 00157 } 00158 } 00159 } 00160 00161 00162 /*! @brief Check that assumptions in the result of checkValid() are a 00163 subset of assertions */ 00164 /*! Only defined in the debug build. 00165 * \ingroup SE_Default 00166 */ 00167 static void 00168 checkAssumpDebug(const Theorem& res, 00169 const CDMap<Expr,Theorem>& assumptions) { 00170 // FIXME: (jimz) will need to traverse graph 00171 00172 if(!res.withAssumptions()) return; 00173 res.clearAllFlags(); 00174 checkAssump(res, res, assumptions); 00175 } 00176 #endif 00177 00178 00179 00180 00181 //////////////////////////// 00182 // New Search Engine Code // 00183 //////////////////////////// 00184 00185 00186 QueryResult SearchEngineFast::checkSAT() 00187 { 00188 d_inCheckSAT=true; 00189 QueryResult result = UNSATISFIABLE; 00190 if (!bcp()) { // run an initial bcp 00191 DebugAssert(d_factQueue.empty(), "checkSAT()"); 00192 if (!fixConflict()) goto checkSATfinalize; 00193 } 00194 while (!d_core->outOfResources()) { 00195 if (split()) { // if we were able to split successfully 00196 // Run BCP 00197 while (!bcp()) { // while bcp finds conflicts 00198 DebugAssert(d_factQueue.empty(), "checkSAT()"); 00199 d_decisionEngine->goalSatisfied(); 00200 00201 // Try to fix those conflicts by rolling back contexts and 00202 // adding new conflict clauses to help in the future. 00203 if (!fixConflict()) goto checkSATfinalize; 00204 } 00205 } 00206 // Now we couldn't find a splitter. This may have been caused by 00207 // other reasons, so we allow them to be processed here. 00208 else { 00209 result = SATISFIABLE; 00210 break; 00211 } 00212 } 00213 checkSATfinalize: 00214 d_inCheckSAT = false; 00215 if (d_core->outOfResources()) result = ABORT; 00216 else if (result == SATISFIABLE && d_core->incomplete()) result = UNKNOWN; 00217 return result; 00218 } 00219 00220 00221 /* There are different heurisitics for splitters available. We would 00222 * normally try to use the new generic decision class, but initially 00223 * we want this just to work, and so we use a custom decision class 00224 * that knows about this particular search engine. We can realize 00225 * this same behavior using the existing mechanism by having a 00226 * decision subclass dynamic_cast the received SearchEngine pointer as 00227 * a SearchEngineFast and work from there. However, as of this time I 00228 * do not plan on supporting the nextClause() and nextNonClause() 00229 * functionality, as they don't make much sense in this kind of modern 00230 * solver. It may make more sense to have the solver and it's literal 00231 * splitting heuristic tightly connected, but leaving the nonLiteral 00232 * splitting heurisitic separate, since it is generally independent of 00233 * the search mechanism. 00234 * 00235 * By this point we've already guaranteed that we need to split: no 00236 * unit clauses, and no conflicts. The procedure is as follows. Ask 00237 * the decision engine for an expression to split on. The decision 00238 * engine dutifully returns an expression. We craft an assumption out 00239 * of this and assert it to the core (after making sure d_assumptions 00240 * has a copy). 00241 * 00242 * If no splitters are available, we still have to let the decision 00243 * procedures have a final chance at showing the context is 00244 * inconsistent by calling checkSATcore(). If we get a false out of 00245 * this, we have to continue processing, so the context is left 00246 * unchanged, no splitter is chosen, and we return true to let the 00247 * bcp+conflict processing have another chance. If we get true, then 00248 * the problem is SAT and we are done, so we return false. 00249 * 00250 * Otherwise, we obtain the splitter, then ship it back off to the 00251 * decision engine for processing. 00252 */ 00253 00254 bool SearchEngineFast::split() 00255 { 00256 TRACE_MSG("search basic", "Choosing splitter"); 00257 Expr splitter = findSplitter(); 00258 if (splitter.isNull()) { 00259 TRACE_MSG("search basic", "No splitter"); 00260 bool res(d_core->inconsistent() || !d_core->checkSATCore()); 00261 if(!res) { 00262 d_splitterCount = 0; // Force resorting splitters next time 00263 res = !bcp(); 00264 } 00265 return res; 00266 } 00267 Literal l(newLiteral(splitter)); 00268 Theorem simp; 00269 if(l.getValue() != 0) { 00270 // The literal is valid at a lower scope than it has been derived, 00271 // and therefore, it was lost after a scope pop. Reassert it here. 00272 simp = l.deriveTheorem(); 00273 d_literals.push_back((l.getValue() == 1)? l : !l); 00274 d_core->addFact(simp); 00275 return true; 00276 } 00277 else { 00278 simp = d_core->simplify(splitter); 00279 Expr e = simp.getRHS(); 00280 if(e.isBoolConst()) { 00281 IF_DEBUG(debugger.counter("splitter simplified to TRUE or FALSE")++;) 00282 if(e.isTrue()) simp = d_commonRules->iffTrueElim(simp); 00283 else { 00284 if(splitter.isNot()) 00285 simp = d_commonRules->notNotElim(d_commonRules->iffFalseElim(simp)); 00286 else 00287 simp = d_commonRules->iffFalseElim(simp); 00288 } 00289 TRACE("search full", "Simplified candidate: ", splitter.toString(), ""); 00290 TRACE("search full", " to: ", 00291 simp.getExpr().toString(), ""); 00292 // Send this literal to DPs and enqueue it for BCP 00293 d_core->addFact(simp); 00294 addLiteralFact(simp); 00295 DebugAssert(l.getValue()!=0, "SearchFast::split(): l = "+l.toString()); 00296 return true; 00297 } 00298 } 00299 00300 TRACE("search terse", "Asserting splitter: #" 00301 +int2string(d_core->getStatistics().counter("splitters"))+": ", 00302 splitter, ""); 00303 d_decisionEngine->pushDecision(splitter); 00304 return true; 00305 } 00306 00307 //! Total order on literals for the initial sort 00308 /*! Used for debugging, to match Chaff's behavior as close as possible 00309 and track any discrepancies or inefficiencies. */ 00310 // static bool compareInitial(const Literal& l1, const Literal& l2) { 00311 // Expr lit1 = l1.getVar().getExpr(); 00312 // Expr lit2 = l2.getVar().getExpr(); 00313 // if(lit1.hasOp() && lit2.hasOp()) { 00314 // int i = atoi(&(lit1.getOp().getName().c_str()[2])); 00315 // int j = atoi(&(lit2.getOp().getName().c_str()[2])); 00316 // return (i < j); 00317 // } 00318 // else 00319 // return (l1.score() > l2.score()); 00320 // } 00321 00322 //! Ordering on literals, used to sort them by score 00323 static inline bool compareLits(const Literal& l1, 00324 const Literal& l2) 00325 { 00326 return (l1.score() > l2.score()); 00327 } 00328 00329 IF_DEBUG(static string lits2str(const vector<Literal>& lits) { 00330 ostringstream ss; 00331 ss << "\n["; 00332 for(vector<Literal>::const_iterator i=lits.begin(), iend=lits.end(); 00333 i!=iend; ++i) 00334 ss << *i << "\n "; 00335 ss << "\n]"; 00336 return ss.str(); 00337 }) 00338 00339 00340 /*! 00341 * Recompute the scores for all known literals. This is a relatively 00342 * expensive procedure, so it should not be called too often. 00343 * Currently, it is called once per 100 splitters. 00344 */ 00345 void SearchEngineFast::updateLitScores(bool firstTime) 00346 { 00347 TRACE("search literals", "updateLitScores(size=", d_litsByScores.size(), 00348 ") {"); 00349 unsigned count, score; 00350 00351 if (firstTime && followChaff) { 00352 ::stable_sort(d_litsByScores.begin(), d_litsByScores.end(), compareLits); 00353 } 00354 00355 for(size_t i=0; i< d_litsByScores.size(); ++i) { 00356 // Reading by value, since we'll be modifying the attributes. 00357 Literal lit = d_litsByScores[i]; 00358 // First, clean up the unused literals 00359 while(lit.count()==0 && i+1 < d_litsByScores.size()) { 00360 TRACE("search literals", "Removing lit["+int2string(i)+"] = ", lit, 00361 " from d_litsByScores"); 00362 // Remove this literal from the list 00363 lit.added()=false; 00364 lit = d_litsByScores.back(); 00365 d_litsByScores[i] = lit; 00366 d_litsByScores.pop_back(); 00367 } 00368 // Take care of the last literal in the vector 00369 if(lit.count()==0 && i+1 == d_litsByScores.size()) { 00370 TRACE("search literals", "Removing last lit["+int2string(i)+"] = ", lit, 00371 " from d_litsByScores"); 00372 lit.added()=false; 00373 d_litsByScores.pop_back(); 00374 break; // Break out of the loop -- no more literals to process 00375 } 00376 00377 TRACE("search literals", "Updating lit["+int2string(i)+"] = ", lit, " {"); 00378 00379 DebugAssert(lit == d_litsByScores[i], "lit = "+lit.toString()); 00380 DebugAssert(lit.added(), "lit = "+lit.toString()); 00381 DebugAssert(lit.count()>0, "lit = "+lit.toString()); 00382 00383 count = lit.count(); 00384 unsigned& countPrev = lit.countPrev(); 00385 int& scoreRef = lit.score(); 00386 00387 score = scoreRef/2 + count - countPrev; 00388 scoreRef = score; 00389 countPrev = count; 00390 00391 TRACE("search literals", "Updated lit["+int2string(i)+"] = ", lit, " }"); 00392 // Literal neglit(!lit); 00393 00394 // count = neglit.count(); 00395 // unsigned& negcountPrev = neglit.countPrev(); 00396 // unsigned& negscoreRef = neglit.score(); 00397 00398 // negscore = negscoreRef/2 + count - negcountPrev; 00399 // negscoreRef = negscore; 00400 // negcountPrev = count; 00401 00402 // if(negscore > score) d_litsByScores[i] = neglit; 00403 } 00404 ::stable_sort(d_litsByScores.begin(), d_litsByScores.end(), compareLits); 00405 d_litsMaxScorePos = 0; 00406 d_litSortCount=d_litsByScores.size(); 00407 TRACE("search splitters","updateLitScores => ", lits2str(d_litsByScores),""); 00408 TRACE("search literals", "updateLitScores(size=", d_litsByScores.size(), 00409 ") => }"); 00410 } 00411 00412 void SearchEngineFast::updateLitCounts(const Clause& c) 00413 { 00414 TRACE("search literals", "updateLitCounts(", CompactClause(c), ") {"); 00415 for(unsigned i=0; i<c.size(); ++i) { 00416 // Assign by value to modify the attributes 00417 Literal lit = c[i]; 00418 DebugAssert(lit.getExpr().isAbsLiteral(),"Expected literal"); 00419 // Only add the literal if it wasn't added before. The literal 00420 // counts are taken care of in Clause constructors/destructors. 00421 // if(!lit.added() || lit.count() != lit.countPrev()) 00422 d_litSortCount--; 00423 00424 if(!lit.added()) { 00425 TRACE("search literals", "adding literal ", lit, " to d_litsByScores"); 00426 d_litsByScores.push_back(lit); 00427 lit.added()=true; 00428 } 00429 } 00430 if(d_litSortCount < 0) { 00431 ::stable_sort(d_litsByScores.begin(), d_litsByScores.end(), compareLits); 00432 d_litSortCount=d_litsByScores.size(); 00433 } 00434 TRACE("search splitters","updateLitCounts => ", lits2str(d_litsByScores),""); 00435 TRACE_MSG("search literals", "updateLitCounts => }"); 00436 } 00437 00438 Expr SearchEngineFast::findSplitter() 00439 { 00440 TRACE_MSG("splitters", "SearchEngineFast::findSplitter() {"); 00441 Expr splitter; // Null by default 00442 unsigned i; 00443 00444 // if we have a conflict clause, pick the one inside with the 00445 // best ac(z) score (from the most recent conflict clause) 00446 // if (d_berkminFlag && !d_conflictClauses.empty()) 00447 if (d_berkminFlag && !d_conflictClauses->empty()) 00448 { 00449 unsigned sCount = 0; 00450 std::deque<ClauseOwner>::reverse_iterator foundUnsat = d_conflictClauses->rend(); 00451 for (std::deque<ClauseOwner>::reverse_iterator i = d_conflictClauses->rbegin(); 00452 i != d_conflictClauses->rend(); 00453 ++i) { 00454 ++sCount; 00455 if (!((Clause)*i).sat(true)) { 00456 foundUnsat = i; 00457 break; 00458 } 00459 } 00460 if (foundUnsat != d_conflictClauses->rend()) { 00461 Clause &topClause = *foundUnsat; 00462 int numLits = topClause.size(); 00463 int bestScore = 0; 00464 int bestLit = -1; 00465 unsigned numChoices = 0; 00466 for (int i = 0; i < numLits; ++i) { 00467 const Literal& lit = topClause[i]; 00468 if (lit.getValue() != 0) continue; 00469 if (bestLit == -1) bestLit = i; 00470 ++numChoices; 00471 int s = lit.score(); 00472 if (s > bestScore) { 00473 bestLit = i; 00474 bestScore = s; 00475 } 00476 } 00477 if (bestLit != -1) { 00478 splitter = topClause[bestLit].getExpr(); 00479 IF_DEBUG(debugger.counter("BerkMin heuristic")++;) 00480 TRACE("splitters", "SearchEngineFast::findSplitter()[berkmin] => ", 00481 splitter, " }"); 00482 return splitter; 00483 } 00484 } 00485 } 00486 00487 /* 00488 // Search for DP-specific splitters 00489 for(CDMapOrdered<Splitter,bool>::iterator i=d_dpSplitters.begin(), 00490 iend=d_dpSplitters.end(); i!=iend; ++i) { 00491 Expr e((*i).first.expr); 00492 if(e.isBoolConst() || d_core->find(e).getRHS().isBoolConst()) 00493 continue; 00494 return e; 00495 } 00496 */ 00497 00498 for (int i = d_nonLiterals.size()-1; i >= 0; --i) { 00499 const Expr& e = d_nonLiterals[i].get().getExpr(); 00500 if (e.isTrue()) continue; 00501 // if (d_nonLiteralSimplified[thm.getExpr()]) continue; 00502 DebugAssert(!e.isBoolConst(), "Expected non-bool const"); 00503 DebugAssert(d_core->simplifyExpr(e) == e, 00504 "Expected non-literal to be simplified:\n e = " 00505 +e.toString()+"\n simplify(e) = " 00506 +d_core->simplifyExpr(e).toString()); 00507 splitter = d_decisionEngine->findSplitter(e); 00508 //DebugAssert(!splitter.isNull(), 00509 // "findSplitter: can't find splitter in non-literal: " 00510 // + e.toString()); 00511 if (splitter.isNull()) continue; 00512 IF_DEBUG(debugger.counter("splitters from non-literals")++;) 00513 TRACE("splitters", "SearchEngineFast::findSplitter()[non-lit] => ", 00514 splitter, " }"); 00515 return splitter; 00516 } 00517 00518 // Update the scores: we are about to choose a splitter based on them 00519 if (d_splitterCount <= 0) { 00520 updateLitScores(false); 00521 // d_splitterCount = d_litsByScores.size(); 00522 // if(d_splitterCount > 100) 00523 d_splitterCount = 0x10; 00524 } else 00525 d_splitterCount--; 00526 // pick splitter based on score 00527 for (i=d_litsMaxScorePos; i<d_litsByScores.size(); ++i) { 00528 const Literal& splitterLit = d_litsByScores[i]; 00529 TRACE("search splitters", "d_litsByScores["+int2string(i)+"] = ", 00530 splitterLit,""); 00531 //if (d_core->simplifyExpr(splitter).isBoolConst()) continue; 00532 if(splitterLit.getValue() != 0) continue; 00533 splitter = splitterLit.getExpr(); 00534 // Skip auxiliary CNF vars 00535 if(!isGoodSplitter(splitter)) continue; 00536 d_litsMaxScorePos = i+1; 00537 IF_DEBUG(debugger.counter("splitters from literals")++;) 00538 TRACE("splitters", "d_litsMaxScorePos: ", d_litsMaxScorePos, ""); 00539 TRACE("splitters", "SearchEngineFast::findSplitter()[lit] => ", 00540 splitter, " }"); 00541 return splitter; 00542 } 00543 TRACE_MSG("splitters", 00544 "SearchEngineFast::findSplitter()[not found] => Null }"); 00545 return Expr(); 00546 } 00547 00548 00549 void SearchEngineFast::recordFact(const Theorem& thm) 00550 { 00551 Literal l(newLiteral(thm.getExpr())); 00552 if(l.getValue() == 0) { 00553 l.setValue(thm, thm.getScope()); 00554 IF_DEBUG(debugger.counter("recordFact adds unreported lit")++;) 00555 d_unreportedLits.insert(l.getExpr(),thm,thm.getScope()); 00556 } else if (l.getValue() == 1 && l.getScope() > thm.getScope()) { 00557 // Cannot do this, it will trigger DebugAssert 00558 // l.setValue(thm,thm.getScope()); 00559 IF_DEBUG(debugger.counter("recordFact adds unreported lit")++;) 00560 d_unreportedLits.insert(l.getExpr(),thm,thm.getScope()); 00561 } else if(l.getValue() < 0) { // Contradiction, bcp will die anyway 00562 if(l.isNegative()) 00563 setInconsistent(d_commonRules->contradictionRule(l.deriveTheorem(), thm)); 00564 else 00565 setInconsistent(d_commonRules->contradictionRule(thm, l.deriveTheorem())); 00566 } 00567 //else if (thm.getScope() < scopeLevel()) 00568 // d_unreportedLits.insert(l.getExpr(),l,thm.getScope()); 00569 00570 } 00571 00572 #ifdef _CVC3_DEBUG_MODE 00573 void SearchEngineFast::fullCheck() 00574 { 00575 for (unsigned i = 0; 00576 i < d_clauses.size(); 00577 ++i) { 00578 if (!((Clause)d_clauses[i]).sat()) { 00579 bool sat = false; 00580 const Clause &theClause = d_clauses[i]; 00581 unsigned numLits = theClause.size(); 00582 unsigned numChoices = 0; 00583 for (unsigned j = 0; !sat && j < numLits; ++j) { 00584 if (theClause[j].getValue() == 0) 00585 ++numChoices; 00586 else if (theClause[j].getValue() == 1) 00587 sat = true; 00588 } 00589 if (sat) continue; 00590 if (numChoices <= 1 || !theClause.wpCheck()) { 00591 CVC3::debugger.getOS() << CompactClause(theClause) << endl; 00592 CVC3::debugger.getOS() << theClause.toString() << endl; 00593 } 00594 DebugAssert(numChoices > 1, "BCP postcondition violated: unsat or unit clause(s)"); 00595 DebugAssert(theClause.wpCheck(), "Watchpointers broken"); 00596 } 00597 } 00598 00599 if (!d_conflictClauses->empty()) 00600 { 00601 for (std::deque<ClauseOwner>::reverse_iterator i = d_conflictClauses->rbegin(); 00602 i != d_conflictClauses->rend(); 00603 ++i) { 00604 if (!((Clause)*i).sat()) { 00605 bool sat = false; 00606 Clause &theClause = *i; 00607 unsigned numLits = theClause.size(); 00608 unsigned numChoices = 0; 00609 for (unsigned j = 0; !sat && j < numLits; ++j) { 00610 if (theClause[j].getValue() == 0) 00611 ++numChoices; 00612 else if (theClause[j].getValue() == 1) 00613 sat = true; 00614 } 00615 if (sat) continue; 00616 if (numChoices <= 1 || !theClause.wpCheck()) { 00617 CVC3::debugger.getOS() << CompactClause(theClause) << endl; 00618 CVC3::debugger.getOS() << theClause.toString() << endl; 00619 } 00620 00621 DebugAssert(numChoices > 1, "BCP postcondition violated: unsat or unit conflict clause(s)"); 00622 DebugAssert(theClause.wpCheck(), "Watchpointers broken"); 00623 00624 } 00625 } 00626 } 00627 } 00628 #endif 00629 00630 00631 void SearchEngineFast::clearLiterals() { 00632 TRACE_MSG("search literals", "clearLiterals()"); 00633 d_literals.clear(); 00634 } 00635 00636 00637 bool SearchEngineFast::bcp() 00638 { 00639 TRACE("search bcp", "bcp@"+int2string(scopeLevel())+"(#lits=", 00640 d_literals.size(), ") {"); 00641 IF_DEBUG(TRACE_MSG("search bcp", "literals=[\n"); 00642 for(size_t i=0,iend=d_literals.size(); i<iend; i++) 00643 TRACE("search bcp", " ", d_literals[i], ","); 00644 TRACE_MSG("search bcp", "]");) 00645 DebugAssert(d_factQueue.empty(), "bcp(): start"); 00646 bool newInfo = true; 00647 /* 00648 CDMap<Expr,Theorem>::iterator i = d_unreportedLits.begin(), 00649 iend = d_unreportedLits.end(); 00650 for (; i != iend; ++i) { 00651 if (d_unreportedLitsHandled[(*i).first]) 00652 continue; 00653 Theorem thm((*i).second); 00654 Literal l(newLiteral(thm.getExpr())); 00655 DebugAssert(l.getValue() != -1, "Bad unreported literal: "+l.toString()); 00656 if(l.getValue() == 0) l.setValue(thm, scopeLevel()); 00657 IF_DEBUG(debugger.counter("re-assert unreported lits")++;) 00658 DebugAssert(l.getExpr().isAbsLiteral(), 00659 "bcp(): pushing non-literal to d_literals:\n " 00660 +l.getExpr().toString()); 00661 // The literal may be set to 1, but not on the list; push it here 00662 // explicitly 00663 d_literals.push_back(l); 00664 //recordFact((*i).second.getTheorem()); 00665 enqueueFact(thm); 00666 d_unreportedLitsHandled[(*i).first] = true; 00667 } 00668 */ 00669 while (newInfo) { 00670 IF_DEBUG(debugger.counter("BCP: while(newInfo)")++;) 00671 TRACE_MSG("search bcp", "while(newInfo) {"); 00672 newInfo = false; 00673 while(!d_core->inconsistent() && d_literals.size() > 0) { 00674 for(unsigned i=0; !d_core->inconsistent() && i<d_literals.size(); ++i) { 00675 Literal l = d_literals[i]; 00676 TRACE("search props", "Propagating literal: [", l.toString(), "]"); 00677 DebugAssert(l.getValue() == 1, "Bad literal is d_literals: " 00678 +l.toString()); 00679 d_litsAlive.push_back(l); 00680 // Use the watch pointers to find more literals to assert. Repeat 00681 // until conflict. Once conflict found, call unsatClause() to 00682 // assert the contradiction. 00683 std::vector<std::pair<Clause, int> >& wps = wp(l); 00684 TRACE("search props", "Appears in ", wps.size(), " clauses."); 00685 for(unsigned j=0; j<wps.size(); ++j) { 00686 const Clause& c = wps[j].first; 00687 int k = wps[j].second; 00688 DebugAssert(c.watched(k).getExpr() == (!l).getExpr(), 00689 "Bad watched literal:\n l = "+l.toString() 00690 +"\n c[k] = "+c.watched(k).toString()); 00691 if(c.deleted()) { // Remove the clause from the list 00692 if(wps.size() > 1) { 00693 wps[j] = wps.back(); 00694 --j; 00695 } 00696 wps.pop_back(); 00697 } else if(true || !c.sat()) { 00698 // Call BCP to update the watch pointer 00699 bool wpUpdated; 00700 bool conflict = !propagate(c, k, wpUpdated); 00701 // Delete the old watch pointer from the list 00702 if(wpUpdated) { 00703 if(wps.size() > 1) { 00704 wps[j] = wps.back(); 00705 --j; 00706 } 00707 wps.pop_back(); 00708 } 00709 if (conflict) { 00710 clearFacts(); 00711 DebugAssert(d_factQueue.empty(), "bcp(): conflict"); 00712 TRACE_MSG("search bcp", "bcp[conflict] => false }}"); 00713 return false; 00714 } 00715 } 00716 } 00717 00718 vector<Circuit*>& cps = d_circuitsByExpr[l.getExpr()]; 00719 for (vector<Circuit*>::iterator it = cps.begin(), end = cps.end(); 00720 it < end; it++) 00721 { 00722 if (!(*it)->propagate(this)) { 00723 clearFacts(); 00724 DebugAssert(d_factQueue.empty(), "bcp(): conflict-2"); 00725 TRACE_MSG("search bcp", "bcp[circuit propagate] => false }}"); 00726 return false; 00727 } 00728 } 00729 } 00730 // Finished with BCP* (without DPs). 00731 clearLiterals(); 00732 // Now, propagate the facts to DPs and repeat ((BCP*); DP) step 00733 if(!d_core->inconsistent()) commitFacts(); 00734 } 00735 if (d_core->inconsistent()) { 00736 d_conflictTheorem = d_core->inconsistentThm(); 00737 clearFacts(); 00738 TRACE_MSG("search bcp", "bcp[DP conflict] => false }}"); 00739 return false; 00740 } 00741 else 00742 TRACE("search basic", "Processed ", d_literals.size(), " propagations"); 00743 IF_DEBUG(fullCheck();) 00744 clearLiterals(); 00745 00746 bool dfs_heuristic = (d_core->getFlags()["de"].getString() == "dfs"); 00747 TRACE("search dfs", "DFS is ", (dfs_heuristic? "on" : "off"), 00748 " (de = "+d_core->getFlags()["de"].getString()+") {"); 00749 // When DFS heuristic is used, simplify the nonliterals only until 00750 // there is a completely simplified one on top of the stack, or 00751 // all of the non-literals are gone. Start from the end of the 00752 // list (top of the stack), since that's where the splitter is 00753 // most likely chosen. This way we are likely to hit something 00754 // that simplifies very soon. 00755 00756 size_t origSize = d_nonLiterals.size(); 00757 bool done(false); 00758 for(int i=origSize-1; !done && !d_core->inconsistent() && i>=0; --i) { 00759 const Theorem& thm = d_nonLiterals[i].get(); 00760 const Expr& e = thm.getExpr(); 00761 TRACE("search dfs", "Simplifying non-literal", e, ""); 00762 if (e.isTrue()) { 00763 // if (d_nonLiteralSimplified[thm.getExpr()]) { 00764 IF_DEBUG(debugger.counter("BCP: simplified non-literals: skipped [stale]")++;) 00765 TRACE_MSG("search bcp", "}[continue]// end of while(newInfo)"); 00766 continue; 00767 } 00768 IF_DEBUG(debugger.counter("BCP: simplified non-literals")++;) 00769 Theorem simpThm = simplify(thm); 00770 Expr simp = simpThm.getExpr(); 00771 if(simp != e) { 00772 IF_DEBUG(debugger.counter("BCP: simplified non-literals: changed")++;) 00773 newInfo = true; 00774 // d_nonLiteralSimplified[thm.getExpr()] = true; 00775 if (!simp.isFalse()) { 00776 while (simp.isExists()) { 00777 simpThm = d_commonRules->skolemize(simpThm); 00778 simp = simpThm.getExpr(); 00779 } 00780 if (simp.isAbsLiteral()) { 00781 enqueueFact(simpThm); 00782 commitFacts(); 00783 } 00784 d_nonLiterals[i] = simpThm; 00785 if(dfs_heuristic) { 00786 // Something has changed, time to stop this loop. If we 00787 // also get a new non-literal on top of the stack, and no 00788 // new literals, then stop the entire BCP (since that 00789 // non-literal is guaranteed to be fully simplified). 00790 done = true; 00791 if(d_nonLiterals.size() > origSize && d_literals.empty()) 00792 newInfo = false; 00793 } 00794 } else 00795 setInconsistent(simpThm); 00796 } else if (dfs_heuristic) done = true; 00797 } 00798 TRACE("search dfs", "End of non-literal simplification: newInfo = ", 00799 (newInfo? "true" : "false"), " }}"); 00800 if (d_core->inconsistent()) { 00801 d_conflictTheorem = d_core->inconsistentThm(); 00802 DebugAssert(d_factQueue.empty(), "bcp(): inconsistent (nonLits)"); 00803 TRACE_MSG("search bcp", "bcp[nonCNF conflict] => false }}"); 00804 return false; 00805 } 00806 TRACE_MSG("search bcp", "}// end of while(newInfo)"); 00807 } 00808 IF_DEBUG(fullCheck();) 00809 DebugAssert(d_factQueue.empty(), "bcp(): end"); 00810 TRACE_MSG("search bcp", "bcp => true }"); 00811 return true; 00812 } 00813 00814 00815 // True if successfully propagated. False if conflict. 00816 bool SearchEngineFast::propagate(const Clause &c, int idx, bool& wpUpdated) 00817 { 00818 TRACE("search propagate", "propagate(", CompactClause(c), 00819 ", idx = "+int2string(idx)+") {"); 00820 DebugAssert(idx==0 || idx==1, "propagate(): bad index = "+int2string(idx)); 00821 // The current watched literal must be set to FALSE, unless the 00822 // clause is of size 1 00823 DebugAssert((c.size() == 1) || c.watched(idx).getValue() < 0, 00824 "propagate(): this literal must be FALSE: c.watched(" 00825 +int2string(idx)+")\n c = "+c.toString()); 00826 wpUpdated = false; 00827 int lit = c.wp(idx), otherLit = c.wp(1-idx); 00828 int dir = c.dir(idx); 00829 int size = c.size(); 00830 while(true) { 00831 TRACE_MSG("search propagate", "propagate: lit="+int2string(lit) 00832 +", otherLit="+int2string(otherLit)+", dir="+int2string(dir)); 00833 lit += dir; 00834 if(lit < 0 || lit >= size) { // hit the edge 00835 if(dir == c.dir(idx)) { 00836 // Finished first half of the clause, do the other half 00837 lit = c.wp(idx); 00838 dir = -dir; 00839 continue; 00840 } 00841 // All literals are false, except for the other watched literal. 00842 // Check its value. 00843 Literal l(c[otherLit]); 00844 if(l.getValue() < 0) { // a conflict 00845 //Literal ff(newLiteral(d_vcl->getEM()->falseExpr())); 00846 //ff.setValue(1, c, -1); 00847 //d_lastBCPConflict = ff; 00848 vector<Theorem> thms; 00849 for (unsigned i = 0; i < c.size(); ++i) 00850 thms.push_back(c[i].getTheorem()); 00851 d_conflictTheorem = d_rules->conflictRule(thms,c.getTheorem()); 00852 TRACE("search propagate", "propagate[", CompactClause(c), 00853 "] => false }"); 00854 return false; 00855 } 00856 else if(l.getValue() == 0) { 00857 DebugAssert(c.size() > 1 && l.getExpr().isAbsLiteral(), "BCP: Expr should be literal"); 00858 d_unitPropCount++; 00859 c.markSat(); 00860 // Let's prove the new literal instead of playing assumption games 00861 unitPropagation(c,otherLit); 00862 //l.setValue(1, c, otherLit); 00863 //d_core->addFact(createAssumption(l)); 00864 TRACE("search propagate", "propagate[", CompactClause(c), 00865 "] => true }"); 00866 return true; 00867 } 00868 else { 00869 c.markSat(); 00870 TRACE("search propagate", "propagate[", CompactClause(c), 00871 "] => true }"); 00872 return true; 00873 } 00874 } 00875 // If it's the other watched literal, skip it 00876 if(lit == otherLit) continue; 00877 00878 Literal l(c[lit]); 00879 int val(l.getValue()); 00880 // if it is false, keep looking 00881 if(val < 0) continue; 00882 // OPTIMIZATION: if lit is TRUE, mark the clause SAT and give up. 00883 // FIXME: this is different from Chaff. Make sure it doesn't harm 00884 // the performance. 00885 if(val > 0) { 00886 c.markSat(); 00887 // return true; 00888 } 00889 00890 // Now the value of 'lit' is unknown. Set the watch pointer to 00891 // this literal, if it is indeed a literal (it may be any formula 00892 // in a pseudo-clause), and update the direction. 00893 c.wp(idx, lit); 00894 c.dir(idx, dir); 00895 DebugAssert(c.watched(idx).getValue() >= 0, 00896 "Bad watched literals in clause:\n" 00897 +CompactClause(c).toString()); 00898 // Get the expression of the literal's inverse 00899 Literal inv(!c[lit]); 00900 // If it is indeed a literal, update the watch pointers 00901 DebugAssert(inv.getExpr().isAbsLiteral(), "Expr should be literal: inv = " 00902 +inv.getExpr().toString()); 00903 // Add the new watched literal to the watch pointer list 00904 pair<Clause, int> p(c, idx); 00905 wp(inv).push_back(p); 00906 00907 // Signal to remove the old watch pointer 00908 wpUpdated = true; 00909 TRACE("search propagate", "propagate[", CompactClause(c), 00910 "] => true }"); 00911 return true; 00912 } 00913 } 00914 00915 void SearchEngineFast::unitPropagation(const Clause &c, unsigned idx) 00916 { 00917 vector<Theorem> thms; 00918 for (unsigned i = 0; i < c.size(); ++i) 00919 if (i != idx) { 00920 thms.push_back(c[i].getTheorem()); 00921 DebugAssert(!thms.back().isNull(), 00922 "unitPropagation(idx = "+int2string(idx)+", i = " 00923 +int2string(i)+",\n"+c.toString()+")"); 00924 } 00925 Theorem thm(d_rules->unitProp(thms,c.getTheorem(),idx)); 00926 enqueueFact(thm); // d_core->addFact(thm); 00927 // recordFact(thm); 00928 00929 DebugAssert(thm.isAbsLiteral(), 00930 "unitPropagation(): pushing non-literal to d_literals:\n " 00931 +thm.getExpr().toString()); 00932 Literal l(newLiteral(thm.getExpr())); 00933 DebugAssert(l.getValue() == 1, "unitPropagation: bad literal: " 00934 +l.toString()); 00935 d_literals.push_back(l); 00936 } 00937 00938 00939 bool SearchEngineFast::fixConflict() 00940 { 00941 TRACE_MSG("search basic", "FixConflict"); 00942 Theorem res, conf; 00943 d_conflictCount++; 00944 TRACE("conflicts", "found conflict # ", d_conflictCount, ""); 00945 IF_DEBUG(if(debugger.trace("impl graph verbose")) { 00946 d_conflictTheorem.printDebug(); 00947 }) 00948 00949 if (scopeLevel() == d_bottomScope) 00950 return false; 00951 else if(d_conflictTheorem.getScope() <= d_bottomScope) { 00952 d_decisionEngine->popTo(d_bottomScope); 00953 d_litsMaxScorePos = 0; // from decision engine 00954 clearLiterals(); 00955 return false; 00956 } 00957 00958 traceConflict(d_conflictTheorem); 00959 00960 if (d_lastConflictScope <= d_bottomScope) 00961 return false; 00962 00963 // If we have unit conflict clauses, then we have to bounce back to 00964 // the original scope and assert them. 00965 if(d_unitConflictClauses.size() > 0) { 00966 TRACE_MSG("search basic", "Found unit conflict clause"); 00967 d_decisionEngine->popTo(d_bottomScope); 00968 d_litsMaxScorePos = 0; // from decision engine 00969 clearLiterals(); 00970 for (vector<Clause>::reverse_iterator i = d_unitConflictClauses.rbegin(); 00971 i != d_unitConflictClauses.rend(); 00972 ++i) { 00973 //IF_DEBUG(checkAssumpDebug(i->getTheorem(), d_assumptions);) 00974 // The theorem of *i is, most likely, (OR lit); rewrite it to just `lit' 00975 Theorem thm = i->getTheorem(); 00976 if(thm.getExpr().isOr()) 00977 thm = d_commonRules->iffMP(thm, d_commonRules->rewriteOr(thm.getExpr())); 00978 enqueueFact(thm); 00979 commitFacts(); // Make sure facts propagate to DPs 00980 } 00981 d_unitConflictClauses.clear(); 00982 return true; // Let bcp take care of the rest. 00983 } 00984 00985 // Otherwise, we need to make our failure driven assertion. 00986 DebugAssert(!d_lastConflictClause.isNull(), ""); 00987 00988 // We need to calculate the backtracking level. We do this by 00989 // examining the decision levels of all the literals involved in the 00990 // last conflict clause. 00991 00992 Clause &c = d_lastConflictClause; 00993 Literal unit_lit; 00994 unsigned idx=0; 00995 unsigned current_dl = d_lastConflictScope; 00996 unsigned back_dl = d_bottomScope; 00997 for (unsigned i = 0; i < c.size(); ++i) { 00998 unsigned dl = c[i].getVar().getScope(); 00999 if (dl < current_dl) { 01000 if (dl > back_dl) 01001 back_dl = dl; 01002 } 01003 else { 01004 DebugAssert(unit_lit.getExpr().isNull(), 01005 "Only one lit from the current decision level is allowed.\n" 01006 "current_dl=" 01007 +int2string(current_dl)+", scopeLevel=" 01008 +int2string(scopeLevel()) 01009 +"\n l1 = " 01010 +unit_lit.toString() 01011 +"\n l2 = "+c[i].toString() 01012 +"\nConflict clause: "+c.toString()); 01013 unit_lit = c[i]; 01014 idx = i; 01015 } 01016 } 01017 01018 01019 // Now we have the backtracking decision level. 01020 DebugAssert(!unit_lit.getExpr().isNull(),"Need to have an FDA in " 01021 "conflict clause: "+c.toString()); 01022 d_decisionEngine->popTo(back_dl); 01023 d_litsMaxScorePos = 0; // from decision engine 01024 clearLiterals(); 01025 unitPropagation(c,idx); 01026 commitFacts(); // Make sure facts propagate to DPs 01027 return true; 01028 } 01029 01030 01031 void SearchEngineFast::enqueueFact(const Theorem& thm) { 01032 // d_core->addFact(thm); 01033 TRACE("search props", "SearchEngineFast::enqueueFact(", 01034 thm.getExpr(), ") {"); 01035 if(thm.isAbsLiteral()) { 01036 addLiteralFact(thm); 01037 } 01038 d_factQueue.push_back(thm); 01039 TRACE_MSG("search props", "SearchEngineFast::enqueueFact => }"); 01040 } 01041 01042 01043 void SearchEngineFast::setInconsistent(const Theorem& thm) { 01044 TRACE_MSG("search props", "SearchEngineFast::setInconsistent()"); 01045 d_factQueue.clear(); 01046 IF_DEBUG(debugger.counter("conflicts from SAT solver")++;) 01047 d_core->setInconsistent(thm); 01048 } 01049 01050 void SearchEngineFast::commitFacts() { 01051 for(vector<Theorem>::iterator i=d_factQueue.begin(), iend=d_factQueue.end(); 01052 i!=iend; ++i) { 01053 TRACE("search props", "commitFacts(", i->getExpr(), ")"); 01054 if(d_useEnqueueFact) 01055 d_core->enqueueFact(*i); 01056 else 01057 d_core->addFact(*i); 01058 } 01059 d_factQueue.clear(); 01060 } 01061 01062 01063 void SearchEngineFast::clearFacts() { 01064 TRACE_MSG("search props", "clearFacts()"); 01065 d_factQueue.clear(); 01066 } 01067 01068 01069 void SearchEngineFast::addNewClause(Clause &c) 01070 { 01071 DebugAssert(c.size() > 1, "New clauses should have size > 1"); 01072 d_clauses.push_back(c); 01073 updateLitCounts(c); 01074 // Set up the watch pointers to this clause: find two unassigned 01075 // literals (otherwise we shouldn't even receive it as a clause) 01076 size_t i=0, iend=c.size(); 01077 for(; i<iend && c[i].getValue() != 0; ++i); 01078 DebugAssert(i<iend, "SearchEngineFast::addNewClause:\n" 01079 "no unassigned literals in the clause:\nc = " 01080 +CompactClause(c).toString()); 01081 c.wp(0,i); ++i; 01082 for(; i<iend && c[i].getValue() != 0; ++i); 01083 DebugAssert(i<iend, "SearchEngineFast::addNewClause:\n" 01084 "Only one unassigned literal in the clause:\nc = " 01085 +CompactClause(c).toString()); 01086 c.wp(1,i); 01087 // Add the watched pointers to the appropriate lists 01088 for(int i=0; i<=1; i++) { 01089 Literal l(!c.watched(i)); 01090 // Add the pointer to l's list 01091 pair<Clause, int> p(c, i); 01092 wp(l).push_back(p); 01093 } 01094 } 01095 01096 inline bool TheoremEq(const Theorem& t1, const Theorem& t2) { 01097 DebugAssert(!t1.isNull() && !t2.isNull(), 01098 "analyzeUIPs() Null Theorem found"); 01099 return t1.getExpr() == t2.getExpr(); 01100 } 01101 01102 01103 //! Auxiliary function used in analyzeUIPs() 01104 /*! It processes a node and populates the relevant sets used in the 01105 * algorithm. 01106 * \sa analyzeUIPs() for more detail). 01107 */ 01108 static void processNode(const Theorem& thm, 01109 vector<Theorem>& lits, 01110 vector<Theorem>& gamma, 01111 vector<Theorem>& fringe, 01112 int& pending) { 01113 // Decrease the fan-out count 01114 int fanOutCount(thm.getCachedValue() - 1); 01115 thm.setCachedValue(fanOutCount); 01116 bool wasFlagged(thm.isFlagged()); 01117 thm.setFlag(); 01118 DebugAssert(fanOutCount >= 0, 01119 "analyzeUIPs(): node visited too many times: " 01120 +thm.toString()); 01121 if(fanOutCount == 0) { 01122 if(thm.getExpandFlag()) { 01123 if(wasFlagged) { 01124 TRACE("impl graph", "pending.erase(", thm.getExpr(), ")"); 01125 DebugAssert(pending > 0, 01126 "analyzeUIPs(): pending set shouldn't be empty here"); 01127 pending--; 01128 } 01129 TRACE("impl graph", "fringe.insert(", thm.getExpr(), ")"); 01130 fringe.push_back(thm); 01131 } else if(thm.getLitFlag()) { 01132 DebugAssert(thm.isAbsLiteral(), "analyzeUIPs(): bad flag on " 01133 +thm.toString()); 01134 if(wasFlagged) { 01135 TRACE("impl graph", "pending.erase(", thm.getExpr(), ")"); 01136 DebugAssert(pending > 0, 01137 "analyzeUIPs(): pending set shouldn't be empty here"); 01138 pending--; 01139 } 01140 TRACE("impl graph", "lits.insert(", thm.getExpr(), ")"); 01141 lits.push_back(thm); 01142 } else { 01143 if(!wasFlagged) { 01144 TRACE("impl graph", "gamma.insert(", thm.getExpr(), ")"); 01145 gamma.push_back(thm); 01146 } else { 01147 TRACE("impl graph", "already in gamma: ", thm.getExpr(), ""); 01148 } 01149 } 01150 } else { // Fan-out count is non-zero 01151 if(thm.getExpandFlag()) { 01152 // Too early to expand; stash in pending 01153 if(!wasFlagged) { 01154 pending++; 01155 TRACE("impl graph", "pending.insert(", thm.getExpr(), ")"); 01156 } else { 01157 TRACE("impl graph", "already in pending: ", thm.getExpr(), ""); 01158 } 01159 } else if(thm.getLitFlag()) { 01160 // It's a literal which goes into pending 01161 if(!wasFlagged) { 01162 pending++; 01163 TRACE("impl graph", "pending.insert(", thm.getExpr(), ")"); 01164 } else { 01165 TRACE("impl graph", "already in pending: ", thm.getExpr(), ""); 01166 } 01167 } else { 01168 if(!wasFlagged) { 01169 TRACE("impl graph", "gamma.insert(", thm.getExpr(), ")"); 01170 gamma.push_back(thm); 01171 } else { 01172 TRACE("impl graph", "already in gamma: ", thm.getExpr(), ""); 01173 } 01174 } 01175 } 01176 // FIXME: uniquify theorems in lits, gamma, and fringe by 01177 // expression; the smallest scope theorem should supersede all the 01178 // duplicates. Question: can we do it safely, without breaking the 01179 // UIP algorithm? 01180 } 01181 01182 /*! 01183 <strong>Finding UIPs (Unique Implication Pointers)</strong> 01184 01185 This is basically the same as finding hammocks of the subset of the 01186 implication graph composed of only the nodes from the current scope. 01187 A hammock is a portion of the graph which has a single source and/or 01188 sink such that removing that single node makes the graph 01189 disconnected. 01190 01191 Conceptually, the algorithm maintains four sets of nodes: literals 01192 (named <em>lits</em>), gamma, fringe, and pending. Literals are 01193 nodes whose expressions will become literals in the conflict clause 01194 of the current hammock, and the nodes in gamma are assumptions from 01195 which such conflict clause theorem is derived. Nodes in fringe are 01196 intermediate nodes which are ready to be "expanded" (see the 01197 algorithm description below). The pending nodes are those which are 01198 not yet ready to be processed (they later become literal or fringe 01199 nodes). 01200 01201 These sets are maintained as vectors, and are updated in such a way 01202 that the nodes in the vectors never repeat. The exception is the 01203 pending set, for which only a size counter is maintained. A node 01204 belongs to the pending set if it has been visited (isFlagged() 01205 method), and its fan-out count is non-zero (stored in the cache, 01206 getCachedValue()). In other words, pending nodes are those that have 01207 been visited, but not sufficient number of times. 01208 01209 Also, fringe is maintained as a pair of vectors. One vector is 01210 always the current fringe, and the other one is built when the 01211 current is processed. When processing of the current fringe is 01212 finished, it is cleared, and the other vector becomes the current 01213 fringe (that is, they swap roles). 01214 01215 A node is expanded if it is marked for expansion (getExpandFlag()). 01216 If its fan-out count is not yet zero, it is added to the set of 01217 pending nodes. 01218 01219 If a node has a literal flag (getLitFlag()), it goes into literals 01220 when its fan-out count reaches zero. Since this will be the last 01221 time the node is visited, it is added to the vector only once. 01222 01223 A node neither marked for expansion nor with the literal flag goes 01224 into the gamma set. It is added the first time the node is visited 01225 (isFlagged() returns false), and therefore, is added to the vector 01226 only once. This is an important distinction from the other sets, 01227 since a gamma-node may be used by several conflict clauses. 01228 01229 Clearing the gamma set after each UIP requires both clearing the 01230 vector and resetting all flags (clearAllFlags()). 01231 01232 <strong>The algorithm</strong> 01233 01234 <ol> 01235 01236 <li> Initially, the fringe contains exactly the predecessors of 01237 falseThm from the current scope which are ready to be expanded 01238 (fan-out count is zero). All the other predecessors of falseThm 01239 go to the appropriate sets of literals, gamma, and pending. 01240 01241 <li> If fringe.size() <= 1 and the set of pending nodes is empty, then 01242 the element in the fringe (if it's non-empty) is a UIP. Generate 01243 a conflict clause from the UIP and the literals (using gamma as 01244 the set of assumptions), empty the sets, and continue with the 01245 next step. Note, that the UIP remains in the fringe, and will be 01246 expanded in the next step. 01247 01248 The important exception: if the only element in the fringe is 01249 marked for expansion, then this is a false UIP (the SAT solver 01250 doesn't know about this node), and it should not appear in the 01251 conflict clause. In this case, simply proceed to step 3 as if 01252 nothing happened. 01253 01254 <li> If fringe.size()==0, stop (the set of pending nodes must also be 01255 empty at this point). Otherwise, for *every* node in the fringe, 01256 decrement the fan-out for each of its predecessors, and empty the 01257 fringe. Take the predecessors from the current scope, and add 01258 those to the fringe for which fan-out count is zero, and remove 01259 them from the set of pending nodes. Add the other predecessors 01260 from the current scope to the set of pending nodes. Add the 01261 remaining predecessors (not from the current scope) to the 01262 literals or gamma, as appropriate. Continue with step 2. 01263 01264 </ol> 01265 */ 01266 void SearchEngineFast::analyzeUIPs(const Theorem &falseThm, int conflictScope) 01267 { 01268 TRACE("impl graph", "analyzeUIPs(scope = ", conflictScope, ") {"); 01269 vector<Theorem> fringe[2]; // 2-element array of vectors (new & curr. fringe) 01270 unsigned curr(0), next(1); 01271 01272 int pending(0); 01273 vector<Theorem> lits; 01274 vector<Theorem> gamma; 01275 01276 Theorem start = falseThm; 01277 d_lastConflictClause = Clause(); 01278 d_lastConflictScope = conflictScope; 01279 start.clearAllFlags(); 01280 01281 TRACE("search full", "Analysing UIPs at level: ", conflictScope, ""); 01282 01283 // Initialize all the sets 01284 const Assumptions& a=start.getAssumptionsRef(); 01285 for(Assumptions::iterator i=a.begin(), iend=a.end(); i!=iend; ++i) { 01286 processNode(*i, lits, gamma, fringe[curr], pending); 01287 } 01288 01289 while (true) { 01290 TRACE_MSG("impl graph", "analyzeUIPs(): fringe size = " 01291 +int2string(fringe[curr].size()) 01292 +", pending size = "+int2string(pending)); 01293 // Wrap up a conflict clause if: 01294 // (1) There are no pending nodes 01295 // (2) The fringe has at most one element 01296 // (3) If fringe is not empty, its node should not be flagged for expansion 01297 if(fringe[curr].size() <= 1 && pending == 0 01298 && (fringe[curr].size() == 0 01299 || !fringe[curr].back().getExpandFlag())) { 01300 // Found UIP or end of graph. Generate conflict clause. 01301 if(fringe[curr].size() > 0) 01302 lits.push_back(fringe[curr].back()); 01303 IF_DEBUG(if(debugger.trace("impl graph")) { 01304 ostream& os = debugger.getOS(); 01305 os << "Creating conflict clause:" 01306 << "\n start: " << start.getExpr() 01307 << "\n Lits: [\n"; 01308 for(size_t i=0; i<lits.size(); i++) 01309 os << " " << lits[i].getExpr() << "\n"; 01310 os << "]\n Gamma: [\n"; 01311 for(size_t i=0; i<gamma.size(); i++) 01312 os << " " << gamma[i].getExpr() << "\n"; 01313 os << "]" << endl; 01314 }) 01315 // Derive a theorem for the conflict clause 01316 Theorem clause = d_rules->conflictClause(start, lits, gamma); 01317 d_conflictClauseCount++; 01318 // Generate the actual clause and set it up 01319 Clause c(d_core, d_vm, clause, d_bottomScope, __FILE__, __LINE__); 01320 updateLitCounts(c); 01321 if (c.size() > 1) { 01322 // Set the watched pointers to the two literals with the 01323 // highest scopes 01324 int firstLit = 0; 01325 int secondLit = 1; 01326 int firstDL = c[0].getScope(); 01327 int secondDL = c[1].getScope(); 01328 // Invariant: firstDL >= secondDL 01329 if(firstDL < secondDL) { 01330 firstLit=1; secondLit=0; 01331 int tmp(secondDL); 01332 secondDL=firstDL; firstDL=tmp; 01333 } 01334 for(unsigned i = 2; i < c.size(); ++i) { 01335 int cur = c[i].getScope(); 01336 if(cur >= firstDL) { 01337 secondLit=firstLit; secondDL=firstDL; 01338 firstLit=i; firstDL=cur; 01339 } else if(cur > secondDL) { 01340 secondLit=i; secondDL=cur; 01341 } 01342 } 01343 01344 c.wp(0, firstLit); 01345 c.wp(1, secondLit); 01346 01347 // Add the watch pointers to the d_wp lists 01348 for(int i=0; i<=1; i++) { 01349 // Negated watched literal 01350 Literal l(!c.watched(i)); 01351 // Add the pointer to l's list 01352 pair<Clause, int> p(c, i); 01353 wp(l).push_back(p); 01354 } 01355 } 01356 TRACE("conflict clauses", 01357 "Conflict clause #"+int2string(d_conflictClauseCount) 01358 +": ", CompactClause(c), ""); 01359 if(c.size() == 1) { 01360 // Unit clause: stash it for later unit propagation 01361 TRACE("conflict clauses", "analyzeUIPs(): unit clause: ", 01362 CompactClause(c), ""); 01363 d_unitConflictClauses.push_back(c); 01364 } 01365 else { 01366 TRACE("conflict clauses", "analyzeUIPs(): conflict clause ", 01367 CompactClause(c), ""); 01368 DebugAssert(c.getScope() <= d_bottomScope, 01369 "Conflict clause should be at bottom scope."); 01370 d_conflictClauses->push_back(c); 01371 if (d_lastConflictClause.isNull()) { 01372 d_lastConflictClause = c; 01373 // IF_DEBUG(for(unsigned i=0; i<c.size(); ++i) 01374 // DebugAssert(c[i].getValue() == -1, "Bad conflict clause: " 01375 // +c.toString());) 01376 } 01377 } 01378 if(fringe[curr].size() > 0) { 01379 // This was a UIP. Leave it in the fringe for later expansion. 01380 IF_DEBUG(debugger.counter("UIPs")++;) 01381 start = fringe[curr].back(); 01382 lits.clear(); 01383 gamma.clear(); 01384 start.clearAllFlags(); 01385 } else { 01386 // No more conflict clauses, we are done. This is the only 01387 // way this function can return. 01388 TRACE_MSG("impl graph", "analyzeUIPs => }"); 01389 return; 01390 } 01391 } 01392 // Now expand all the nodes in the fringe 01393 for(vector<Theorem>::iterator i=fringe[curr].begin(), 01394 iend=fringe[curr].end(); 01395 i!=iend; ++i) { 01396 const Assumptions& a=i->getAssumptionsRef(); 01397 for(Assumptions::iterator j=a.begin(), jend=a.end(); j!=jend; ++j) { 01398 processNode(*j, lits, gamma, fringe[next], pending); 01399 } 01400 } 01401 // Swap the current and next fringes 01402 fringe[curr].clear(); 01403 curr = 1 - curr; 01404 next = 1 - next; 01405 IF_DEBUG(if(pending > 0 && fringe[curr].size()==0) 01406 falseThm.printDebug();) 01407 DebugAssert(pending == 0 || fringe[curr].size() > 0, 01408 "analyzeUIPs(scope = " 01409 +int2string(conflictScope)+"): empty fringe"); 01410 } 01411 } 01412 01413 01414 //////////////////////////////// 01415 // End New Search Engine Code // 01416 //////////////////////////////// 01417 01418 01419 //! Redefine the counterexample generation. 01420 /*! FIXME: for now, it just dumps all the assumptions (same as 01421 * getAssumptions()). Eventually, it will simplify the related 01422 * formulas to TRUE, merge all the generated assumptions into 01423 * d_lastCounterExample, and call the parent's method. */ 01424 void SearchEngineFast::getCounterExample(std::vector<Expr>& assertions) { 01425 // This will not add anything, since the counterexample is empty, 01426 // but will check if we are allowed to be called 01427 SearchImplBase::getCounterExample(assertions); 01428 getAssumptions(assertions); 01429 } 01430 01431 01432 //! Notify the search engine about a new non-literal fact. 01433 /*! It should be called by TheoryCore::assertFactCore(). 01434 * 01435 * If the fact is an AND, we split it into individual conjuncts and 01436 * add them individually. 01437 * 01438 * If the fact is an OR, we check whether it's a CNF clause; that is, 01439 * whether all disjuncts are literals. If they are, we add it as a 01440 * CNF clause. 01441 * 01442 * Otherwise add the fact to d_nonLiterals as it is. 01443 */ 01444 void 01445 SearchEngineFast::addNonLiteralFact(const Theorem& thm) { 01446 TRACE("search", "addNonLiteralFact(", thm, ") {"); 01447 TRACE("search", "d_nonLiteralsSaved.size()=",d_nonLiteralsSaved.size(), 01448 "@"+int2string(scopeLevel())); 01449 //IF_DEBUG(checkAssumpDebug(thm, d_assumptions);) 01450 const Expr& e(thm.getExpr()); 01451 if(d_nonLiteralsSaved.count(e) > 0) { 01452 // We've seen this non-literal before. 01453 TRACE_MSG("search", "addNonLiteralFact[skipping] => }"); 01454 IF_DEBUG(debugger.counter("skipped repeated non-literals")++;) 01455 return; 01456 } 01457 // Save the non-literal 01458 d_nonLiteralsSaved[e]=thm; 01459 bool isCNFclause(false); 01460 // Split conjunctions into individual assertions and add them to the 01461 // appropriate lists 01462 01463 int k = e.getKind(); 01464 if (k == AND_R || k == IFF_R || k == ITE_R) 01465 { 01466 d_circuits.push_back(new Circuit(this, thm)); 01467 } 01468 01469 else if(e.isAnd()) { 01470 for(int i=0, iend=e.arity(); i<iend; ++i) { 01471 Theorem t_i(d_commonRules->andElim(thm, i)); 01472 // Call enqueueFact(), not addFact(), since we are called from 01473 // addFact() here 01474 if(e[i].isAbsLiteral()) { 01475 d_core->enqueueFact(t_i); 01476 } 01477 else addNonLiteralFact(t_i); 01478 } 01479 } else { 01480 int unsetLits(0); // Count the number of unset literals 01481 size_t unitLit(0); // If the #unsetLits==1, this is the only unset literal 01482 vector<Theorem> thms; // collect proofs of !l_i for unit prop. 01483 if(e.isOr()) { 01484 isCNFclause = true; 01485 for(int i=0; isCNFclause && i<e.arity(); ++i) { 01486 isCNFclause=e[i].isAbsLiteral(); 01487 if(isCNFclause) { 01488 // Check the value of the literal 01489 Literal l(newLiteral(e[i])); 01490 if(l.getValue() > 0) // The entire clause is true; ignore it 01491 return; 01492 if(l.getValue() == 0) { // Found unset literal 01493 unsetLits++; unitLit = i; 01494 } else // Literal is false, collect the theorem for it 01495 thms.push_back(l.deriveTheorem()); 01496 } 01497 } 01498 } 01499 if(isCNFclause) { 01500 DebugAssert(e.arity() > 1, "Clause should have more than one literal"); 01501 // Check if clause is unit or unsat 01502 if(unsetLits==0) { // Contradiction 01503 TRACE("search", "contradictory clause:\n", 01504 CompactClause(Clause(d_core, d_vm, thm, scopeLevel())),""); 01505 setInconsistent(d_rules->conflictRule(thms, thm)); 01506 } else if(unsetLits==1) { // Unit clause: propagate literal 01507 TRACE("search", "unit clause, unitLit = "+int2string(unitLit)+":\n", 01508 CompactClause(Clause(d_core, d_vm, thm, scopeLevel())),""); 01509 d_core->enqueueFact(d_rules->unitProp(thms, thm, unitLit)); 01510 } else { // Wrap up the clause 01511 Clause c(d_core, d_vm, thm, scopeLevel(), __FILE__, __LINE__); 01512 IF_DEBUG(debugger.counter("CNF clauses added")++;) 01513 TRACE("search", "addNonLiteralFact: adding CNF: ", c, ""); 01514 addNewClause(c); 01515 } 01516 } else { 01517 TRACE("search", "addNonLiteralFact: adding non-literal: ", thm, ""); 01518 IF_DEBUG(debugger.counter("added non-literals")++;) 01519 d_nonLiterals.push_back(SmartCDO<Theorem>(d_core->getCM()->getCurrentContext(), thm)); 01520 // d_nonLiteralSimplified[thm.getExpr()] = false; 01521 } 01522 } 01523 TRACE_MSG("search", "addNonLiteralFact => }"); 01524 } 01525 01526 01527 //! Notify the search engine about a new literal fact. 01528 /*! It should be called by TheoryCore::assertFactCore() */ 01529 void 01530 SearchEngineFast::addLiteralFact(const Theorem& thm) { 01531 TRACE("search", "addLiteralFact(", thm, ")"); 01532 // Save the value of the flag to restore it later 01533 bool useEF(d_useEnqueueFact); 01534 d_useEnqueueFact=true; 01535 01536 DebugAssert(thm.isAbsLiteral(), 01537 "addLiteralFact: not a literal: " + thm.toString()); 01538 //IF_DEBUG(checkAssumpDebug(thm, d_assumptions);) 01539 Literal l(newLiteral(thm.getExpr())); 01540 TRACE("search", "addLiteralFact: literal = ", l, ""); 01541 // Only add the literal if it doesn't have any value; otherwise it's 01542 // either a contradiction, or it must already be in the list 01543 // FIXME: why did we need thm.getScope() != 0 ??? 01544 if ((l.getValue() == 0 /* && thm.getScope() != 0 */) 01545 /* || (l.getValue() == 1 && l.getScope() > thm.getScope()) */) { 01546 l.setValue(thm, scopeLevel()); 01547 DebugAssert(l.getExpr().isAbsLiteral(), 01548 "addLiteralFact(): pushing non-literal to d_literals:\n " 01549 +l.getExpr().toString()); 01550 DebugAssert(l.getValue() == 1, "addLiteralFact(): l = "+l.toString()); 01551 d_literals.push_back(l); 01552 d_literalSet.insert(l.getExpr(),l); 01553 // Immediately propagate the literal with BCP, unless the SAT 01554 // solver is already running 01555 if(!d_inCheckSAT) bcp(); 01556 01557 // if (thm.getScope() != scopeLevel()) { 01558 // IF_DEBUG(debugger.counter("addLiteralFact adds unreported lit")++;) 01559 // d_unreportedLits.insert(l.getExpr(),thm,thm.getScope()); 01560 // } 01561 } else if(l.getValue() < 0) { // Contradiction, bcp will die anyway 01562 if(l.isNegative()) 01563 setInconsistent(d_commonRules->contradictionRule(l.deriveTheorem(), thm)); 01564 else 01565 setInconsistent(d_commonRules->contradictionRule(thm, l.deriveTheorem())); 01566 } 01567 d_useEnqueueFact=useEF; 01568 } 01569 01570 /*! @brief Redefine newIntAssumption(): we need to add the new theorem 01571 * to the appropriate Literal */ 01572 Theorem 01573 SearchEngineFast::newIntAssumption(const Expr& e) { 01574 Theorem thm(SearchImplBase::newIntAssumption(e)); 01575 DebugAssert(thm.isAssump(), "Splitter should be an assumption:" + thm.toString()); 01576 TRACE("search full", "Splitter: ", thm.toString(), ""); 01577 const Expr& expr = thm.getExpr(); 01578 Literal l(newLiteral(expr)); 01579 if(l.getValue() == 0) { 01580 l.setValue(thm, scopeLevel()); 01581 if(l.getExpr().isAbsLiteral()) { 01582 DebugAssert(l.getValue() == 1, "newIntAssumption(): l = "+l.toString()); 01583 d_literals.push_back(l); 01584 } 01585 else 01586 d_litsAlive.push_back(l); 01587 } 01588 01589 01590 return thm; 01591 } 01592 01593 bool 01594 SearchEngineFast::isAssumption(const Expr& e) { 01595 return (SearchImplBase::isAssumption(e) 01596 || (d_nonLiteralsSaved.count(e) > 0)); 01597 } 01598 01599 01600 void 01601 SearchEngineFast::addSplitter(const Expr& e, int priority) { 01602 // SearchEngine::addSplitter(e, priority); 01603 DebugAssert(e.isAbsLiteral(), "SearchEngineFast::addSplitter("+e.toString()+")"); 01604 Literal lit(newLiteral(e)); 01605 d_dpSplitters.push_back(Splitter(lit)); 01606 if(priority != 0) { 01607 d_litSortCount--; 01608 lit.score() += priority*10; 01609 } 01610 if(!lit.added()) { 01611 TRACE("search literals", "addSplitter(): adding literal ", lit, " to d_litsByScores"); 01612 d_litsByScores.push_back(lit); 01613 lit.added()=true; 01614 if(priority == 0) d_litSortCount--; 01615 } 01616 if(d_litSortCount < 0) { 01617 ::stable_sort(d_litsByScores.begin(), d_litsByScores.end(), compareLits); 01618 d_litSortCount=d_litsByScores.size(); 01619 } 01620 TRACE("search splitters","addSplitter => ", lits2str(d_litsByScores),""); 01621 } 01622 01623 01624 QueryResult SearchEngineFast::checkValidMain(const Expr& e2) 01625 { 01626 // Propagate the literals asserted before checkValid() 01627 for(CDMap<Expr,Literal>::iterator i=d_literalSet.begin(), 01628 iend=d_literalSet.end(); i!=iend; ++i) 01629 d_literals.push_back((*i).second); 01630 01631 // Run the SAT solver 01632 QueryResult result = checkSAT(); 01633 01634 Theorem res; 01635 if (result == UNSATISFIABLE) 01636 res = d_conflictTheorem; 01637 else if (result == SATISFIABLE) { 01638 // Set counter-example 01639 vector<Expr> a; 01640 unsigned i; 01641 Theorem thm; 01642 01643 d_lastCounterExample.clear(); 01644 for (i=d_nonlitQueryStart; i < d_nonlitQueryEnd; ++i) { 01645 thm = d_nonLiterals[i].get(); 01646 DebugAssert(thm.getExpr().isTrue(), 01647 "original nonLiteral doesn't simplify to true"); 01648 thm.getLeafAssumptions(a); 01649 for (vector<Expr>::iterator i=a.begin(), iend=a.end(); i != iend; ++i) { 01650 d_lastCounterExample[*i] = true; 01651 } 01652 a.clear(); 01653 } 01654 for (i=d_clausesQueryStart; i < d_clausesQueryEnd; ++i) { 01655 thm = simplify(((Clause)d_clauses[i]).getTheorem()); 01656 DebugAssert(thm.getExpr().isTrue(), 01657 "original nonLiteral doesn't simplify to true"); 01658 thm.getLeafAssumptions(a); 01659 for (vector<Expr>::iterator i=a.begin(), iend=a.end(); i != iend; ++i) { 01660 d_lastCounterExample[*i] = true; 01661 } 01662 a.clear(); 01663 } 01664 } 01665 else return result; 01666 01667 processResult(res, e2); 01668 01669 if (result == UNSATISFIABLE) { 01670 d_core->getCM()->popto(d_bottomScope); 01671 d_litsMaxScorePos = 0; // from decision engine 01672 01673 // Clear data structures 01674 d_unitConflictClauses.clear(); 01675 clearLiterals(); 01676 clearFacts(); 01677 01678 Theorem e_iff_e2(d_commonRules->iffContrapositive(d_simplifiedThm)); 01679 d_lastValid = 01680 d_commonRules->iffMP(d_lastValid, d_commonRules->symmetryRule(e_iff_e2)); 01681 IF_DEBUG(checkAssumpDebug(d_lastValid, d_assumptions);) 01682 TRACE_MSG("search terse", "checkValid => true}"); 01683 TRACE("search", "checkValid => true; theorem = ", d_lastValid, "}"); 01684 d_core->getCM()->pop(); 01685 } 01686 else { 01687 TRACE_MSG("search terse", "checkValid => false}"); 01688 TRACE_MSG("search", "checkValid => false; }"); 01689 DebugAssert(d_unitConflictClauses.size() == 0, "checkValid(): d_unitConflictClauses postcondition violated"); 01690 DebugAssert(d_literals.size() == 0, "checkValid(): d_literals postcondition violated"); 01691 DebugAssert(d_factQueue.empty(), "checkValid(): d_factQueue postcondition violated"); 01692 } 01693 return result; 01694 } 01695 01696 01697 QueryResult SearchEngineFast::checkValidInternal(const Expr& e) 01698 { 01699 DebugAssert(d_unitConflictClauses.size() == 0, "checkValid(): d_unitConflitClauses precondition violated"); 01700 DebugAssert(d_factQueue.empty(), "checkValid(): d_factQueue precondition violated"); 01701 01702 TRACE("search", "checkValid(", e, ") {"); 01703 TRACE_MSG("search terse", "checkValid() {"); 01704 01705 if (!e.getType().isBool()) { 01706 throw TypecheckException 01707 ("checking validity of a non-boolean expression:\n\n " 01708 + e.toString() 01709 + "\n\nwhich has the following type:\n\n " 01710 + e.getType().toString()); 01711 } 01712 01713 // A successful query should leave the context unchanged 01714 d_core->getCM()->push(); 01715 d_conflictClauseManager.setRestorePoint(); 01716 d_bottomScope = scopeLevel(); 01717 01718 // First, simplify the NEGATION of the given expression: that's what 01719 // we'll assert 01720 d_simplifiedThm = d_core->getExprTrans()->preprocess(e.negate()); 01721 TRACE("search", "checkValid: simplifiedThm = ", d_simplifiedThm, ""); 01722 01723 const Expr& not_e2 = d_simplifiedThm.get().getRHS(); 01724 Expr e2 = not_e2.negate(); 01725 01726 // Assert not_e2 if it's not already asserted 01727 TRACE_MSG("search terse", "checkValid: Asserting !e: "); 01728 TRACE("search", "checkValid: Asserting !e: ", not_e2, ""); 01729 Theorem not_e2_thm; 01730 d_nonlitQueryStart = d_nonLiterals.size(); 01731 d_clausesQueryStart = d_clauses.size(); 01732 if(d_assumptions.count(not_e2) == 0) { 01733 not_e2_thm = newUserAssumption(not_e2); 01734 } else { 01735 not_e2_thm = d_assumptions[not_e2]; 01736 } 01737 // d_core->addFact(not_e2_thm); 01738 d_nonlitQueryEnd = d_nonLiterals.size(); 01739 d_clausesQueryEnd = d_clauses.size(); 01740 01741 // Reset the splitter counter. This will force a call to 01742 // updateLitScores() the first time we need to find a splitter, and 01743 // clean out junk from the previous calls to checkValid(). 01744 d_splitterCount=0; 01745 01746 return checkValidMain(e2); 01747 } 01748 01749 01750 QueryResult SearchEngineFast::restartInternal(const Expr& e) 01751 { 01752 DebugAssert(d_unitConflictClauses.size() == 0, "restart(): d_unitConflitClauses precondition violated"); 01753 DebugAssert(d_factQueue.empty(), "restart(): d_factQueue precondition violated"); 01754 01755 TRACE("search", "restart(", e, ") {"); 01756 TRACE_MSG("search terse", "restart() {"); 01757 01758 if (!e.getType().isBool()) { 01759 throw TypecheckException 01760 ("argument to restart is a non-boolean expression:\n\n " 01761 + e.toString() 01762 + "\n\nwhich has the following type:\n\n " 01763 + e.getType().toString()); 01764 } 01765 01766 if (d_bottomScope == 0) { 01767 throw Exception("Call to restart with no current query"); 01768 } 01769 d_core->getCM()->popto(d_bottomScope); 01770 01771 Expr e2 = d_simplifiedThm.get().getRHS().negate(); 01772 01773 TRACE_MSG("search terse", "restart: Asserting e: "); 01774 TRACE("search", "restart: Asserting e: ", e, ""); 01775 if(d_assumptions.count(e) == 0) { 01776 d_core->addFact(newUserAssumption(e)); 01777 } 01778 01779 return checkValidMain(e2); 01780 } 01781 01782 /*! 01783 * The purpose of this method is to mark up the assumption graph of 01784 * the FALSE Theorem (conflictThm) for the later UIP analysis. The 01785 * required flags for each assumption in the graph are: 01786 * 01787 * <strong>ExpandFlag:</strong> whether to "expand" the node or not; 01788 * that is, whether to consider the current node as a final assumption 01789 * (either as a conflict clause literal, or a context assumption from 01790 * \f$\Gamma\f$) 01791 * 01792 * <strong>LitFlag:</strong> the node (actually, its inverse) is a 01793 * literal of the conflict clause 01794 * 01795 * <strong>CachedValue:</strong> the "fanout" count, how many nodes in 01796 * the assumption graph are derived from the current node. 01797 * 01798 * INVARIANTS (after the method returns): 01799 * 01800 * -# The currect scope is the "true" conflict scope, 01801 * i.e. scopeLevel() == conflictThm.getScope() 01802 * -# The literals marked with LitFlag (CC literals) are known to the 01803 * SAT solver, i.e. their Literal class has a value == 1 01804 * -# The only CC literal from the current scope is the latest splitter 01805 * 01806 * ASSUMPTIONS: 01807 * 01808 * -# The Theorem scope of an assumption is the same as its Literal scope; 01809 * i.e. if thm is a splitter, then 01810 * thm.getScope() == newLiteral(thm.getExpr()).getScope() 01811 * 01812 * Algorithm: 01813 * 01814 * First, backtrack to the scope level of the conflict. Then, 01815 * traverse the implication graph until we either hit a literal known 01816 * to the SAT solver at a lower scope: 01817 * newLiteral(e).getScope()<scopeLevel(), or a splitter (assumption), or a 01818 * fact from the bottom scope. The literals from the first two 01819 * categories are marked with LitFlag (they'll comprise the conflict 01820 * clause), and the bottom scope facts will be the assumptions in the 01821 * conflict clause's theorem. 01822 * 01823 * The traversed nodes are cached by the .isFlagged() flag, and 01824 * subsequent hits only increase the fanout count of the node. 01825 * 01826 * Notice, that there can only be one literal in the conflict clause 01827 * from the current scope. Also, even if some intermediate literals 01828 * were delayed by the DPs and reported to the SAT solver at or below 01829 * the conflict scope (which may be below their "true" Theorem scope), 01830 * they will be skipped, and their assumptions will be used. 01831 * 01832 * In other words, it is safe to backtrack to the "true" conflict 01833 * level, since, in the worst case, we traverse the graph all the way 01834 * to the splitters, and make a conflict clause out of those. The 01835 * hope is, however, that this wouldn't happen too often. 01836 */ 01837 void SearchEngineFast::traceConflict(const Theorem& conflictThm) { 01838 TRACE("impl graph", "traceConflict(", conflictThm.getExpr(), ") {"); 01839 01840 // Always process conflict at its true scope or the bottom scope, 01841 // whichever is greater. 01842 DebugAssert(conflictThm.getScope() <= scopeLevel(), 01843 "conflictThm.getScope()="+int2string(conflictThm.getScope()) 01844 +", scopeLevel()="+int2string(scopeLevel())); 01845 if(conflictThm.getScope() < scopeLevel()) { 01846 int scope(conflictThm.getScope()); 01847 if(scope < d_bottomScope) scope = d_bottomScope; 01848 d_decisionEngine->popTo(scope); 01849 } 01850 01851 if(scopeLevel() <= d_bottomScope) { 01852 // This is a top-level conflict, nothing to analyze. 01853 TRACE_MSG("impl graph", "traceConflict[bottom scope] => }"); 01854 return; 01855 } 01856 01857 // DFS stack 01858 vector<Theorem> stack; 01859 // Max assumption scope for the contradiction 01860 int maxScope(d_bottomScope); 01861 // Collect potential top-level splitters 01862 IF_DEBUG(vector<Theorem> splitters;) 01863 TRACE("impl graph", "traceConflict: maxScope = ", maxScope, ""); 01864 01865 conflictThm.clearAllFlags(); 01866 conflictThm.setExpandFlag(true); 01867 conflictThm.setCachedValue(0); 01868 01869 const Assumptions& assump = conflictThm.getAssumptionsRef(); 01870 for(Assumptions::iterator i=assump.begin(),iend=assump.end();i!=iend;++i) { 01871 TRACE("impl graph", "traceConflict: adding ", *i, ""); 01872 stack.push_back(*i); 01873 } 01874 01875 // Do the non-recursive DFS, mark up the assumption graph 01876 IF_DEBUG(Literal maxScopeLit;) 01877 while(stack.size() > 0) { 01878 Theorem thm(stack.back()); 01879 stack.pop_back(); 01880 TRACE("impl graph", "traceConflict: while() { thm = ", thm, ""); 01881 if (thm.isFlagged()) { 01882 // We've seen this theorem before. Update fanout count. 01883 thm.setCachedValue(thm.getCachedValue() + 1); 01884 TRACE("impl graph", "Found again: ", thm.getExpr().toString(), ""); 01885 TRACE("impl graph", "With fanout: ", thm.getCachedValue(), ""); 01886 } 01887 else { 01888 // This is a new theorem. Process it. 01889 thm.setCachedValue(1); 01890 thm.setFlag(); 01891 thm.setLitFlag(false); // Clear this flag (it may be set later) 01892 bool expand(false); 01893 int scope = thm.getScope(); 01894 bool isAssump = thm.isAssump(); 01895 01896 IF_DEBUG({ 01897 int s = scope; 01898 if(thm.isAbsLiteral()) { 01899 Literal l(newLiteral(thm.getExpr())); 01900 if(l.getValue() == 1) s = l.getScope(); 01901 } 01902 // maxScope will be reset: clear the splitters 01903 if(s > maxScope) splitters.clear(); 01904 }) 01905 01906 if(thm.isAbsLiteral()) { 01907 Literal l(newLiteral(thm.getExpr())); 01908 bool isTrue(l.getValue()==1); 01909 if(isTrue) scope = l.getScope(); 01910 if(!isAssump && (!isTrue || scope == scopeLevel())) 01911 expand=true; 01912 else if(scope > d_bottomScope) {// Found a literal of a conflict clause 01913 IF_DEBUG(if(scope >= maxScope) splitters.push_back(thm);) 01914 thm.setLitFlag(true); 01915 } 01916 } else { 01917 DebugAssert(scope <= d_bottomScope || !isAssump, 01918 "SearchEngineFast::traceConflict: thm = " 01919 +thm.toString()); 01920 if(!isAssump && scope > d_bottomScope) 01921 expand=true; 01922 } 01923 01924 if(scope > maxScope) { 01925 maxScope = scope; 01926 IF_DEBUG(maxScopeLit = newLiteral(thm.getExpr());) 01927 TRACE("impl graph", "traceConflict: maxScope = ", maxScope, ""); 01928 } 01929 01930 if(expand) { 01931 DebugAssert(!thm.isAssump(), 01932 "traceConflict: thm = "+thm.toString()); 01933 thm.setExpandFlag(true); 01934 const Assumptions& assump = thm.getAssumptionsRef(); 01935 for(Assumptions::iterator i=assump.begin(),iend=assump.end(); 01936 i!=iend; ++i) { 01937 TRACE("impl graph", "traceConflict: adding ", *i, ""); 01938 stack.push_back(*i); 01939 } 01940 } else 01941 thm.setExpandFlag(false); 01942 } 01943 TRACE_MSG("impl graph", "traceConflict: end of while() }"); 01944 } 01945 IF_DEBUG(if(maxScope != scopeLevel()) conflictThm.printDebug();) 01946 DebugAssert(maxScope == scopeLevel(), "maxScope="+int2string(maxScope) 01947 +", scopeLevel()="+int2string(scopeLevel()) 01948 +"\n maxScopeLit = "+maxScopeLit.toString()); 01949 IF_DEBUG( 01950 if(!(maxScope == d_bottomScope || splitters.size() == 1)) { 01951 conflictThm.printDebug(); 01952 ostream& os = debugger.getOS(); 01953 os << "\n\nsplitters = ["; 01954 for(size_t i=0; i<splitters.size(); ++i) 01955 os << splitters[i] << "\n"; 01956 os << "]" << endl; 01957 } 01958 ) 01959 DebugAssert(maxScope == d_bottomScope || splitters.size() == 1, 01960 "Wrong number of splitters found at maxScope=" 01961 +int2string(maxScope) 01962 +": "+int2string(splitters.size())+" splitters."); 01963 d_lastConflictScope = maxScope; 01964 analyzeUIPs(conflictThm, maxScope); 01965 TRACE_MSG("impl graph", "traceConflict => }"); 01966 }