Skip to content

Commit

Permalink
VLTC search tune
Browse files Browse the repository at this point in the history
Search parameters were tuned using 152k games at 180+1.8.

Passed VLTC:
https://tests.stockfishchess.org/tests/view/65a7a81979aa8af82b973a20
LLR: 2.94 (-2.94,2.94) <0.00,2.00>
Total: 117338 W: 29244 L: 28848 D: 59246
Ptnml(0-2): 24, 12474, 33267, 12890, 14

Passed VVLTC:
https://tests.stockfishchess.org/tests/view/65ab246679aa8af82b977982
LLR: 2.94 (-2.94,2.94) <0.50,2.50>
Total: 28164 W: 7239 L: 6957 D: 13968
Ptnml(0-2): 3, 2651, 8490, 2937, 1

STC Elo estimate:
https://tests.stockfishchess.org/tests/view/65ac7c0979aa8af82b9792a6
Elo: -0.53 ± 2.0 (95%) LOS: 30.4%
Total: 30000 W: 7688 L: 7734 D: 14578
Ptnml(0-2): 102, 3617, 7614, 3559, 108
nElo: -1.03 ± 3.9 (95%) PairsRatio: 0.99

closes #5003

Bench: 1235377
  • Loading branch information
XInTheDark authored and Disservin committed Jan 21, 2024
1 parent a901474 commit a6fd17f
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 39 deletions.
74 changes: 37 additions & 37 deletions src/search.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ namespace {

// Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving) {
Value futilityMult = 116 - 44 * noTtCutNode;
Value futilityMult = 114 - 47 * noTtCutNode;
return (futilityMult * d - 3 * futilityMult / 2 * improving);
}

Expand All @@ -66,15 +66,15 @@ constexpr int futility_move_count(bool improving, Depth depth) {
// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) {
auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)];
v += cv * std::abs(cv) / 16384;
v += cv * std::abs(cv) / 14095;
return std::clamp(int(v), VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
}

// History and stats update bonus, based on depth
int stat_bonus(Depth d) { return std::min(268 * d - 352, 1153); }
int stat_bonus(Depth d) { return std::min(265 * d - 349, 1112); }

// History and stats update malus, based on depth
int stat_malus(Depth d) { return std::min(400 * d - 354, 1201); }
int stat_malus(Depth d) { return std::min(482 * d - 326, 1172); }

// Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
Expand Down Expand Up @@ -334,12 +334,12 @@ void Search::Worker::iterative_deepening() {

// Reset aspiration window starting size
Value avg = rootMoves[pvIdx].averageScore;
delta = Value(9) + int(avg) * avg / 14847;
delta = Value(9) + int(avg) * avg / 13181;
alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, int(VALUE_INFINITE));

// Adjust optimism based on root move's averageScore (~4 Elo)
optimism[us] = 121 * avg / (std::abs(avg) + 109);
optimism[us] = 132 * avg / (std::abs(avg) + 98);
optimism[~us] = -optimism[us];

// Start with a small aspiration window and, in the case of a fail
Expand Down Expand Up @@ -769,7 +769,7 @@ Value Search::Worker::search(
// Use static evaluation difference to improve quiet move ordering (~9 Elo)
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
{
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1652, 1546);
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1680, 1406);
bonus = bonus > 0 ? 2 * bonus : bonus / 2;
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
Expand All @@ -790,7 +790,7 @@ Value Search::Worker::search(
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
// Adjust razor margin according to cutoffCnt. (~1 Elo)
if (eval < alpha - 472 - (284 - 165 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
if (eval < alpha - 435 - (327 - 167 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
Expand All @@ -799,17 +799,17 @@ Value Search::Worker::search(

// Step 8. Futility pruning: child node (~40 Elo)
// The depth condition is important for mate finding.
if (!ss->ttPv && depth < 9
if (!ss->ttPv && depth < 11
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving)
- (ss - 1)->statScore / 337
- (ss - 1)->statScore / 327
>= beta
&& eval >= beta && eval < 29008 // smaller than TB wins
&& eval >= beta && eval < 27734 // smaller than TB wins
&& (!ttMove || ttCapture))
return beta > VALUE_TB_LOSS_IN_MAX_PLY ? (eval + beta) / 2 : eval;

// Step 9. Null move search with verification search (~35 Elo)
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 17496
&& eval >= beta && eval >= ss->staticEval && ss->staticEval >= beta - 23 * depth + 304
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 17787
&& eval >= beta && eval >= ss->staticEval && ss->staticEval >= beta - 22 * depth + 313
&& !excludedMove && pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly
&& beta > VALUE_TB_LOSS_IN_MAX_PLY)
{
Expand Down Expand Up @@ -863,7 +863,7 @@ Value Search::Worker::search(
if (cutNode && depth >= 8 && !ttMove)
depth -= 2;

probCutBeta = beta + 163 - 67 * improving;
probCutBeta = beta + 173 - 73 * improving;

// Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search returns a value
Expand Down Expand Up @@ -923,7 +923,7 @@ Value Search::Worker::search(
moves_loop: // When in check, search starts here

// Step 12. A small Probcut idea, when we are in check (~4 Elo)
probCutBeta = beta + 425;
probCutBeta = beta + 427;
if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 4 && ttValue >= probCutBeta
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
Expand Down Expand Up @@ -1006,15 +1006,15 @@ Value Search::Worker::search(
{
Piece capturedPiece = pos.piece_on(move.to_sq());
int futilityEval =
ss->staticEval + 238 + 305 * lmrDepth + PieceValue[capturedPiece]
ss->staticEval + 277 + 298 * lmrDepth + PieceValue[capturedPiece]
+ thisThread->captureHistory[movedPiece][move.to_sq()][type_of(capturedPiece)]
/ 7;
if (futilityEval < alpha)
continue;
}

// SEE based pruning for captures and checks (~11 Elo)
if (!pos.see_ge(move, -187 * depth))
if (!pos.see_ge(move, -203 * depth))
continue;
}
else
Expand All @@ -1026,18 +1026,18 @@ Value Search::Worker::search(
+ thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()];

// Continuation history based pruning (~2 Elo)
if (lmrDepth < 6 && history < -3752 * depth)
if (lmrDepth < 6 && history < -4195 * depth)
continue;

history += 2 * thisThread->mainHistory[us][move.from_to()];
history += 69 * thisThread->mainHistory[us][move.from_to()] / 32;

lmrDepth += history / 7838;
lmrDepth += history / 6992;
lmrDepth = std::max(lmrDepth, -1);

// Futility pruning: parent node (~13 Elo)
if (!ss->inCheck && lmrDepth < 14
&& ss->staticEval + (bestValue < ss->staticEval - 57 ? 124 : 71)
+ 118 * lmrDepth
if (!ss->inCheck && lmrDepth < 15
&& ss->staticEval + (bestValue < ss->staticEval - 63 ? 137 : 64)
+ 111 * lmrDepth
<= alpha)
continue;

Expand All @@ -1064,11 +1064,11 @@ Value Search::Worker::search(
// so changing them requires tests at these types of time controls.
// Recursive singular search is avoided.
if (!rootNode && move == ttMove && !excludedMove
&& depth >= 4 - (thisThread->completedDepth > 27) + 2 * (PvNode && tte->is_pv())
&& depth >= 4 - (thisThread->completedDepth > 31) + 2 * (PvNode && tte->is_pv())
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
Value singularBeta = ttValue - (66 + 58 * (ss->ttPv && !PvNode)) * depth / 64;
Value singularBeta = ttValue - (58 + 52 * (ss->ttPv && !PvNode)) * depth / 64;
Depth singularDepth = newDepth / 2;

ss->excludedMove = move;
Expand All @@ -1082,7 +1082,7 @@ Value Search::Worker::search(
singularQuietLMR = !ttCapture;

// Avoid search explosion by limiting the number of double extensions
if (!PvNode && value < singularBeta - 17 && ss->doubleExtensions <= 11)
if (!PvNode && value < singularBeta - 16 && ss->doubleExtensions <= 12)
{
extension = 2;
depth += depth < 15;
Expand All @@ -1109,7 +1109,7 @@ Value Search::Worker::search(

// If we are on a cutNode but the ttMove is not assumed to fail high over current beta (~1 Elo)
else if (cutNode)
extension = depth < 19 ? -2 : -1;
extension = depth < 20 ? -2 : -1;

// If the ttMove is assumed to fail low over the value of the reduced search (~1 Elo)
else if (ttValue <= value)
Expand All @@ -1122,14 +1122,14 @@ Value Search::Worker::search(

// Quiet ttMove extensions (~1 Elo)
else if (PvNode && move == ttMove && move == ss->killers[0]
&& (*contHist[0])[movedPiece][move.to_sq()] >= 4325)
&& (*contHist[0])[movedPiece][move.to_sq()] >= 4111)
extension = 1;

// Recapture extensions (~1 Elo)
else if (PvNode && move == ttMove && move.to_sq() == prevSq
&& thisThread->captureHistory[movedPiece][move.to_sq()]
[type_of(pos.piece_on(move.to_sq()))]
> 4146)
> 4484)
extension = 1;
}

Expand Down Expand Up @@ -1189,10 +1189,10 @@ Value Search::Worker::search(
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
+ (*contHist[0])[movedPiece][move.to_sq()]
+ (*contHist[1])[movedPiece][move.to_sq()]
+ (*contHist[3])[movedPiece][move.to_sq()] - 3817;
+ (*contHist[3])[movedPiece][move.to_sq()] - 4119;

// Decrease/increase reduction for moves with a good/bad history (~25 Elo)
r -= ss->statScore / 14767;
r -= ss->statScore / 15373;

// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
// We use various heuristics for the sons of a node after the first son has
Expand All @@ -1215,7 +1215,7 @@ Value Search::Worker::search(
{
// Adjust full-depth search based on LMR results - if the result
// was good enough search deeper, if it was bad enough search shallower.
const bool doDeeperSearch = value > (bestValue + 53 + 2 * newDepth); // (~1 Elo)
const bool doDeeperSearch = value > (bestValue + 51 + 2 * newDepth); // (~1 Elo)
const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)

newDepth += doDeeperSearch - doShallowerSearch;
Expand Down Expand Up @@ -1331,7 +1331,7 @@ Value Search::Worker::search(
else
{
// Reduce other moves if we have found at least one score improvement (~2 Elo)
if (depth > 2 && depth < 12 && beta < 13782 && value > -11541)
if (depth > 2 && depth < 12 && beta < 13195 && value > -12346)
depth -= 2;

assert(depth > 0);
Expand Down Expand Up @@ -1370,7 +1370,7 @@ Value Search::Worker::search(
// Bonus for prior countermove that caused the fail low
else if (!priorCapture && prevSq != SQ_NONE)
{
int bonus = (depth > 6) + (PvNode || cutNode) + ((ss - 1)->statScore < -18782)
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -16797)
+ ((ss - 1)->moveCount > 10);
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
stat_bonus(depth) * bonus);
Expand Down Expand Up @@ -1529,7 +1529,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
if (bestValue > alpha)
alpha = bestValue;

futilityBase = ss->staticEval + 182;
futilityBase = ss->staticEval + 186;
}

const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
Expand Down Expand Up @@ -1609,7 +1609,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
continue;

// Do not search moves with bad enough SEE values (~5 Elo)
if (!pos.see_ge(move, -77))
if (!pos.see_ge(move, -76))
continue;
}

Expand Down Expand Up @@ -1764,7 +1764,7 @@ void update_all_stats(const Position& pos,

if (!pos.capture_stage(bestMove))
{
int bestMoveBonus = bestValue > beta + 173 ? quietMoveBonus // larger bonus
int bestMoveBonus = bestValue > beta + 177 ? quietMoveBonus // larger bonus
: stat_bonus(depth); // smaller bonus

// Increase stats for the best move in case it was a quiet move
Expand Down
4 changes: 2 additions & 2 deletions src/search.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,8 @@ class Worker {

Depth reduction(bool i, Depth d, int mn, int delta) {
int reductionScale = reductions[d] * reductions[mn];
return (reductionScale + 1346 - int(delta) * 896 / int(rootDelta)) / 1024
+ (!i && reductionScale > 880);
return (reductionScale + 1177 - int(delta) * 776 / int(rootDelta)) / 1024
+ (!i && reductionScale > 842);
}

// Get a pointer to the search manager, only allowed to be called by the
Expand Down

0 comments on commit a6fd17f

Please sign in to comment.