Skip to content

Commit

Permalink
Tune search at very long time control
Browse files Browse the repository at this point in the history
This patch is a result of tuning done by user @candirufish after 150k games.

Since the tuned values were really interesting and touched heuristics
that are known for their non-linear scaling I decided to run limited
games LTC match, even if the STC test was really bad (which was expected).
After seeing the results of the LTC match, I also run a VLTC (very long
time control) SPRTtest, which passed.

The main difference is in extensions: this patch allows much more
singular/double extensions, both in terms of allowing them at lower
depths and with lesser margins.

Failed STC:
https://tests.stockfishchess.org/tests/view/620d66643ec80158c0cd3b46
LLR: -2.94 (-2.94,2.94) <0.00,2.50>
Total: 4968 W: 1194 L: 1398 D: 2376
Ptnml(0-2): 47, 633, 1294, 497, 13

Performed well at LTC in a fixed-length match:
https://tests.stockfishchess.org/tests/view/620d66823ec80158c0cd3b4a
ELO: 3.36 +-1.8 (95%) LOS: 100.0%
Total: 30000 W: 7966 L: 7676 D: 14358
Ptnml(0-2): 36, 2936, 8755, 3248, 25

Passed VLTC SPRT test:
https://tests.stockfishchess.org/tests/view/620da11a26f5b17ec884f939
LLR: 2.96 (-2.94,2.94) <0.50,3.00>
Total: 4400 W: 1326 L: 1127 D: 1947
Ptnml(0-2): 13, 309, 1348, 526, 4

closes #3937

Bench: 6318903
  • Loading branch information
Vizvezdenec authored and snicolet committed Feb 17, 2022
1 parent 3ec6e1d commit 84b1940
Showing 1 changed file with 44 additions and 44 deletions.
88 changes: 44 additions & 44 deletions src/search.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,15 @@ namespace {

// Futility margin
Value futility_margin(Depth d, bool improving) {
return Value(147 * (d - improving));
return Value(168 * (d - improving));
}

// Reductions lookup table, initialized at startup
int Reductions[MAX_MOVES]; // [depth or moveNumber]

Depth reduction(bool i, Depth d, int mn, Value delta, Value rootDelta) {
int r = Reductions[d] * Reductions[mn];
return (r + 1627 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 992);
return (r + 1463 - int(delta) * 1024 / int(rootDelta)) / 1024 + (!i && r > 1010);
}

constexpr int futility_move_count(bool improving, Depth depth) {
Expand All @@ -80,7 +80,7 @@ namespace {

// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
return std::min((8 * d + 281) * d - 241 , 1949);
return std::min((9 * d + 270) * d - 311 , 2145);
}

// Add a small random component to draw evaluations to avoid 3-fold blindness
Expand Down Expand Up @@ -157,7 +157,7 @@ namespace {
void Search::init() {

for (int i = 1; i < MAX_MOVES; ++i)
Reductions[i] = int((21.14 + std::log(Threads.size()) / 2) * std::log(i));
Reductions[i] = int((20.81 + std::log(Threads.size()) / 2) * std::log(i));
}


Expand Down Expand Up @@ -303,10 +303,10 @@ void Thread::search() {

multiPV = std::min(multiPV, rootMoves.size());

complexityAverage.set(211, 1);
complexityAverage.set(202, 1);

trend = SCORE_ZERO;
optimism[ us] = Value(33);
optimism[ us] = Value(39);
optimism[~us] = -optimism[us];

int searchAgainCounter = 0;
Expand Down Expand Up @@ -349,16 +349,16 @@ void Thread::search() {
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].averageScore;
delta = Value(19) + int(prev) * prev / 18321;
delta = Value(16) + int(prev) * prev / 19178;
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);

// Adjust trend and optimism based on root move's previousScore
int tr = sigmoid(prev, 4, 11, 92, 119, 1);
int tr = sigmoid(prev, 3, 8, 90, 125, 1);
trend = (us == WHITE ? make_score(tr, tr / 2)
: -make_score(tr, tr / 2));

int opt = sigmoid(prev, 9, 18, 115, 12250, 187);
int opt = sigmoid(prev, 8, 17, 144, 13966, 183);
optimism[ us] = Value(opt);
optimism[~us] = -optimism[us];
}
Expand Down Expand Up @@ -459,17 +459,17 @@ void Thread::search() {
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
double fallingEval = (66 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 809.70;
double fallingEval = (69 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 781.4;
fallingEval = std::clamp(fallingEval, 0.5, 1.5);

// If the bestMove is stable over several iterations, reduce time accordingly
timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.73 : 0.94;
double reduction = (1.66 + mainThread->previousTimeReduction) / (2.35 * timeReduction);
timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.63 : 0.73;
double reduction = (1.56 + mainThread->previousTimeReduction) / (2.20 * timeReduction);
double bestMoveInstability = 1.073 + std::max(1.0, 2.25 - 9.9 / rootDepth)
* totBestMoveChanges / Threads.size();
int complexity = mainThread->complexityAverage.value();
double complexPosition = std::clamp(1.0 + (complexity - 293) / 1525.0, 0.5, 1.5);
double complexPosition = std::clamp(1.0 + (complexity - 326) / 1618.1, 0.5, 1.5);

double totalTime = Time.optimum() * fallingEval * reduction * bestMoveInstability * complexPosition;

Expand All @@ -490,7 +490,7 @@ void Thread::search() {
}
else if ( Threads.increaseDepth
&& !mainThread->ponder
&& Time.elapsed() > totalTime * 0.49)
&& Time.elapsed() > totalTime * 0.43)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
Expand Down Expand Up @@ -766,7 +766,7 @@ namespace {
// margin and the improving flag are used in various pruning heuristics.
improvement = (ss-2)->staticEval != VALUE_NONE ? ss->staticEval - (ss-2)->staticEval
: (ss-4)->staticEval != VALUE_NONE ? ss->staticEval - (ss-4)->staticEval
: 184;
: 175;

improving = improvement > 0;
complexity = abs(ss->staticEval - (us == WHITE ? eg_value(pos.psq_score()) : -eg_value(pos.psq_score())));
Expand All @@ -777,8 +777,8 @@ namespace {
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
if ( !PvNode
&& depth <= 6
&& eval < alpha - 486 - 314 * depth * depth)
&& depth <= 7
&& eval < alpha - 348 - 258 * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
Expand All @@ -791,24 +791,24 @@ namespace {
&& depth < 8
&& eval - futility_margin(depth, improving) - (ss-1)->statScore / 256 >= beta
&& eval >= beta
&& eval < 22266) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
&& eval < 26305) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
return eval;

// Step 9. Null move search with verification search (~22 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
&& (ss-1)->statScore < 15075
&& (ss-1)->statScore < 14695
&& eval >= beta
&& eval >= ss->staticEval
&& ss->staticEval >= beta - 18 * depth - improvement / 19 + 215 + complexity / 30
&& ss->staticEval >= beta - 15 * depth - improvement / 15 + 198 + complexity / 28
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
{
assert(eval - beta >= 0);

// Null move dynamic reduction based on depth, eval and complexity of position
Depth R = std::min(int(eval - beta) / 184, 4) + depth / 3 + 4 - (complexity > 799);
Depth R = std::min(int(eval - beta) / 147, 5) + depth / 3 + 4 - (complexity > 753);

ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
Expand Down Expand Up @@ -844,7 +844,7 @@ namespace {
}
}

probCutBeta = beta + 204 - 52 * improving;
probCutBeta = beta + 179 - 46 * improving;

// Step 10. ProbCut (~4 Elo)
// If we have a good enough capture and a reduced search returns a value
Expand Down Expand Up @@ -920,7 +920,7 @@ namespace {
moves_loop: // When in check, search starts here

// Step 12. A small Probcut idea, when we are in check (~0 Elo)
probCutBeta = beta + 401;
probCutBeta = beta + 481;
if ( ss->inCheck
&& !PvNode
&& depth >= 2
Expand Down Expand Up @@ -1014,14 +1014,14 @@ namespace {
if ( !pos.empty(to_sq(move))
&& !givesCheck
&& !PvNode
&& lmrDepth < 7
&& lmrDepth < 6
&& !ss->inCheck
&& ss->staticEval + 424 + 138 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 7 < alpha)
&& ss->staticEval + 281 + 179 * lmrDepth + PieceValue[EG][pos.piece_on(to_sq(move))]
+ captureHistory[movedPiece][to_sq(move)][type_of(pos.piece_on(to_sq(move)))] / 6 < alpha)
continue;

// SEE based pruning (~9 Elo)
if (!pos.see_ge(move, Value(-214) * depth))
if (!pos.see_ge(move, Value(-203) * depth))
continue;
}
else
Expand All @@ -1040,11 +1040,11 @@ namespace {
// Futility pruning: parent node (~9 Elo)
if ( !ss->inCheck
&& lmrDepth < 11
&& ss->staticEval + 147 + 125 * lmrDepth + history / 64 <= alpha)
&& ss->staticEval + 122 + 138 * lmrDepth + history / 60 <= alpha)
continue;

// Prune moves with negative SEE (~3 Elo)
if (!pos.see_ge(move, Value(-23 * lmrDepth * lmrDepth - 31 * lmrDepth)))
if (!pos.see_ge(move, Value(-25 * lmrDepth * lmrDepth - 20 * lmrDepth)))
continue;
}
}
Expand All @@ -1059,15 +1059,15 @@ namespace {
// a reduced search on all the other moves but the ttMove and if the
// result is lower than ttValue minus a margin, then we will extend the ttMove.
if ( !rootNode
&& depth >= 6 + 2 * (PvNode && tte->is_pv())
&& depth >= 4 + 2 * (PvNode && tte->is_pv())
&& move == ttMove
&& !excludedMove // Avoid recursive singular search
/* && ttValue != VALUE_NONE Already implicit in the next condition */
&& abs(ttValue) < VALUE_KNOWN_WIN
&& (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
Value singularBeta = ttValue - 4 * depth;
Value singularBeta = ttValue - 3 * depth;
Depth singularDepth = (depth - 1) / 2;

ss->excludedMove = move;
Expand All @@ -1080,7 +1080,7 @@ namespace {

// Avoid search explosion by limiting the number of double extensions
if ( !PvNode
&& value < singularBeta - 52
&& value < singularBeta - 26
&& ss->doubleExtensions <= 8)
extension = 2;
}
Expand All @@ -1100,15 +1100,15 @@ namespace {

// Check extensions (~1 Elo)
else if ( givesCheck
&& depth > 8
&& abs(ss->staticEval) > 81)
&& depth > 9
&& abs(ss->staticEval) > 71)
extension = 1;

// Quiet ttMove extensions (~0 Elo)
else if ( PvNode
&& move == ttMove
&& move == ss->killers[0]
&& (*contHist[0])[movedPiece][to_sq(move)] >= 7546)
&& (*contHist[0])[movedPiece][to_sq(move)] >= 5491)
extension = 1;
}

Expand Down Expand Up @@ -1170,18 +1170,18 @@ namespace {
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- 4123;
- 4334;

// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
r -= ss->statScore / 17417;
r -= ss->statScore / 15914;

// In general we want to cap the LMR depth search at newDepth. But if reductions
// are really negative and movecount is low, we allow this move to be searched
// deeper than the first move (this may lead to hidden double extensions).
int deeper = r >= -1 ? 0
: moveCount <= 5 ? 2
: PvNode && depth > 3 ? 1
: cutNode && moveCount <= 7 ? 1
: moveCount <= 4 ? 2
: PvNode && depth > 4 ? 1
: cutNode && moveCount <= 8 ? 1
: 0;

Depth d = std::clamp(newDepth - r, 1, newDepth + deeper);
Expand All @@ -1190,7 +1190,7 @@ namespace {

// If the son is reduced and fails high it will be re-searched at full depth
doFullDepthSearch = value > alpha && d < newDepth;
doDeeperSearch = value > (alpha + 76 + 11 * (newDepth - d));
doDeeperSearch = value > (alpha + 78 + 11 * (newDepth - d));
didLMR = true;
}
else
Expand Down Expand Up @@ -1342,7 +1342,7 @@ namespace {
//or fail low was really bad
bool extraBonus = PvNode
|| cutNode
|| bestValue < alpha - 71 * depth;
|| bestValue < alpha - 70 * depth;

update_continuation_histories(ss-1, pos.piece_on(prevSq), prevSq, stat_bonus(depth) * (1 + extraBonus));
}
Expand Down Expand Up @@ -1473,7 +1473,7 @@ namespace {
if (PvNode && bestValue > alpha)
alpha = bestValue;

futilityBase = bestValue + 139;
futilityBase = bestValue + 118;
}

const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
Expand Down

0 comments on commit 84b1940

Please sign in to comment.