Skip to content

Commit

Permalink
Pull request: 5248-imp-cache
Browse files Browse the repository at this point in the history
Updates AdguardTeam/AdGuardHome#5248.

Squashed commit of the following:

commit 46255f5
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Fri Dec 9 18:12:48 2022 +0300

    proxy: imp logs

commit 6bc8867
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Fri Dec 9 17:35:20 2022 +0300

    all: upd go; imp logs; typos

commit b3c8625
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Fri Dec 9 16:10:36 2022 +0300

    proxy: imp cache locking
  • Loading branch information
ainar-g committed Dec 9, 2022
1 parent b14cd4b commit f096b3d
Show file tree
Hide file tree
Showing 6 changed files with 140 additions and 147 deletions.
2 changes: 1 addition & 1 deletion bamboo-specs/bamboo.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ plan:
key: DNSPROXYSPECS
name: dnsproxy - Build and run tests
variables:
dockerGo: adguard/golang-ubuntu:5.0
dockerGo: adguard/golang-ubuntu:5.4
dockerLint: golangci/golangci-lint:v1.50.0

stages:
Expand Down
93 changes: 39 additions & 54 deletions proxy/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ type cache struct {
// itemsWithSubnetLock protects requests cache.
itemsWithSubnetLock sync.RWMutex

// cacheSize is the size of a key-value pair of cache.
cacheSize int
// optimistic defines if the cache should return expired items and resolve
// those again.
optimistic bool
Expand Down Expand Up @@ -162,22 +160,30 @@ func (c *cache) unpackItem(data []byte, req *dns.Msg) (ci *cacheItem, expired bo
// initCache initializes cache if it's enabled.
func (p *Proxy) initCache() {
if !p.CacheEnabled {
log.Info("dnsproxy: cache: disabled")

return
}

log.Printf("DNS cache is enabled")
size := p.CacheSizeBytes
log.Info("dnsproxy: cache: enabled, size %d b", size)

p.cache = newCache(size, p.EnableEDNSClientSubnet, p.CacheOptimistic)
p.shortFlighter = newOptimisticResolver(p)
}

c := &cache{
optimistic: p.CacheOptimistic,
cacheSize: p.CacheSizeBytes,
// newCache returns a properly initialized cache.
func newCache(size int, withECS, optimistic bool) (c *cache) {
c = &cache{
items: createCache(size),
optimistic: optimistic,
}
p.cache = c
c.initLazy()
if p.EnableEDNSClientSubnet {
c.initLazyWithSubnet()

if withECS {
c.itemsWithSubnet = createCache(size)
}

p.shortFlighter = newOptimisticResolver(p)
return c
}

// get returns cached item for the req if it's found. expired is true if the
Expand Down Expand Up @@ -241,35 +247,15 @@ func canLookUpInCache(cache glcache.Cache, req *dns.Msg) (ok bool) {
return cache != nil && req != nil && len(req.Question) == 1
}

// initLazy initializes the cache for general requests.
func (c *cache) initLazy() {
c.itemsLock.Lock()
defer c.itemsLock.Unlock()

if c.items == nil {
c.items = c.createCache()
}
}

// initLazyWithSubnet initializes the cache for requests with subnets.
func (c *cache) initLazyWithSubnet() {
c.itemsWithSubnetLock.Lock()
defer c.itemsWithSubnetLock.Unlock()

if c.itemsWithSubnet == nil {
c.itemsWithSubnet = c.createCache()
}
}

// createCache returns new Cache with predefined settings.
func (c *cache) createCache() (glc glcache.Cache) {
// createCache returns new Cache with the given cacheSize.
func createCache(cacheSize int) (glc glcache.Cache) {
conf := glcache.Config{
MaxSize: defaultCacheSize,
EnableLRU: true,
}

if c.cacheSize > 0 {
conf.MaxSize = uint(c.cacheSize)
if cacheSize > 0 {
conf.MaxSize = uint(cacheSize)
}

return glcache.New(conf)
Expand All @@ -282,8 +268,6 @@ func (c *cache) set(m *dns.Msg, u upstream.Upstream) {
return
}

c.initLazy()

key := msgToKey(m)
packed := item.pack()

Expand All @@ -301,8 +285,6 @@ func (c *cache) setWithSubnet(m *dns.Msg, u upstream.Upstream, subnet *net.IPNet
return
}

c.initLazyWithSubnet()

pref, _ := subnet.Mask.Size()
key := msgToKeyWithSubnet(m, subnet.IP, pref)
packed := item.pack()
Expand All @@ -318,19 +300,20 @@ func (c *cache) clearItems() {
c.itemsLock.Lock()
defer c.itemsLock.Unlock()

if c.items != nil {
c.items.Clear()
}
c.items.Clear()
}

// clearItemsWithSubnet empties the subnet cache.
// clearItemsWithSubnet empties the subnet cache, if any.
func (c *cache) clearItemsWithSubnet() {
if c.itemsWithSubnet == nil {
// ECS disabled, return immediately.
return
}

c.itemsWithSubnetLock.Lock()
defer c.itemsWithSubnetLock.Unlock()

if c.itemsWithSubnet != nil {
c.itemsWithSubnet.Clear()
}
c.itemsWithSubnet.Clear()
}

// cacheTTL returns the number of seconds for which m is valid to be cached.
Expand All @@ -344,16 +327,18 @@ func cacheTTL(m *dns.Msg) (ttl uint32) {
case m == nil:
return 0
case m.Truncated:
log.Tracef("refusing to cache truncated message")
log.Debug("dnsproxy: cache: truncated message; not caching")

return 0
case len(m.Question) != 1:
log.Tracef("refusing to cache message with wrong number of questions")
log.Debug("dnsproxy: cache: message with wrong number of questions; not caching")

return 0
default:
ttl = calculateTTL(m)
if ttl == 0 {
log.Debug("dnsproxy: cache: ttl calculated to be 0; not caching")

return 0
}
}
Expand All @@ -363,18 +348,18 @@ func cacheTTL(m *dns.Msg) (ttl uint32) {
if isCacheableSucceded(m) {
return ttl
}

log.Debug("dnsproxy: cache: not a cacheable noerror response; not caching")
case dns.RcodeNameError:
if isCacheableNegative(m) {
return ttl
}

log.Debug("dnsproxy: cache: not a cacheable nxdomain response; not caching")
case dns.RcodeServerFailure:
return ttl
default:
log.Tracef(
"%s: refusing to cache message with response code %s",
m.Question[0].Name,
dns.RcodeToString[rcode],
)
log.Debug("dnsproxy: cache: response code %s; not caching", dns.RcodeToString[rcode])
}

return 0
Expand All @@ -393,7 +378,7 @@ func hasIPAns(m *dns.Msg) (ok bool) {
}

// isCacheableSucceded returns true if m contains useful data to be cached
// treating it as a succeesful response.
// treating it as a successful response.
func isCacheableSucceded(m *dns.Msg) (ok bool) {
qType := m.Question[0].Qtype

Expand Down
Loading

0 comments on commit f096b3d

Please sign in to comment.