scrape: remove unsafe code

The `yolostring` routine was intended to avoid an allocation when
converting from a `[]byte` to a `string` for map lookup.
However, since 2014 Go has recognized this pattern and does not make
a copy of the data when looking up a map. So the unsafe code is not
necessary.

In line with this, constants like `scrapeHealthMetricName` also become
`[]byte`.

Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
pull/11748/head
Bryan Boreham 2 years ago
parent ccea61c7bf
commit bec5abc4dc

@ -27,7 +27,6 @@ import (
"strconv" "strconv"
"sync" "sync"
"time" "time"
"unsafe"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -1006,8 +1005,8 @@ func (c *scrapeCache) iterDone(flushCache bool) {
} }
} }
func (c *scrapeCache) get(met string) (*cacheEntry, bool) { func (c *scrapeCache) get(met []byte) (*cacheEntry, bool) {
e, ok := c.series[met] e, ok := c.series[string(met)]
if !ok { if !ok {
return nil, false return nil, false
} }
@ -1015,11 +1014,11 @@ func (c *scrapeCache) get(met string) (*cacheEntry, bool) {
return e, true return e, true
} }
func (c *scrapeCache) addRef(met string, ref storage.SeriesRef, lset labels.Labels, hash uint64) { func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
if ref == 0 { if ref == 0 {
return return
} }
c.series[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
} }
func (c *scrapeCache) addDropped(met string) { func (c *scrapeCache) addDropped(met string) {
@ -1027,8 +1026,8 @@ func (c *scrapeCache) addDropped(met string) {
c.droppedSeries[met] = &iter c.droppedSeries[met] = &iter
} }
func (c *scrapeCache) getDropped(met string) bool { func (c *scrapeCache) getDropped(met []byte) bool {
iterp, ok := c.droppedSeries[met] iterp, ok := c.droppedSeries[string(met)]
if ok { if ok {
*iterp = c.iter *iterp = c.iter
} }
@ -1052,7 +1051,7 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) {
c.metaMtx.Lock() c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)] e, ok := c.metadata[string(metric)]
if !ok { if !ok {
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
c.metadata[string(metric)] = e c.metadata[string(metric)] = e
@ -1069,12 +1068,12 @@ func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) {
func (c *scrapeCache) setHelp(metric, help []byte) { func (c *scrapeCache) setHelp(metric, help []byte) {
c.metaMtx.Lock() c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)] e, ok := c.metadata[string(metric)]
if !ok { if !ok {
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
c.metadata[string(metric)] = e c.metadata[string(metric)] = e
} }
if e.Help != yoloString(help) { if e.Help != string(help) {
e.Help = string(help) e.Help = string(help)
e.lastIterChange = c.iter e.lastIterChange = c.iter
} }
@ -1086,12 +1085,12 @@ func (c *scrapeCache) setHelp(metric, help []byte) {
func (c *scrapeCache) setUnit(metric, unit []byte) { func (c *scrapeCache) setUnit(metric, unit []byte) {
c.metaMtx.Lock() c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)] e, ok := c.metadata[string(metric)]
if !ok { if !ok {
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
c.metadata[string(metric)] = e c.metadata[string(metric)] = e
} }
if e.Unit != yoloString(unit) { if e.Unit != string(unit) {
e.Unit = string(unit) e.Unit = string(unit)
e.lastIterChange = c.iter e.lastIterChange = c.iter
} }
@ -1509,7 +1508,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
sl.cache.metaMtx.Lock() sl.cache.metaMtx.Lock()
defer sl.cache.metaMtx.Unlock() defer sl.cache.metaMtx.Unlock()
metaEntry, metaOk := sl.cache.metadata[yoloString([]byte(lset.Get(labels.MetricName)))] metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)]
if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) {
metadataChanged = true metadataChanged = true
meta.Type = metaEntry.Type meta.Type = metaEntry.Type
@ -1584,10 +1583,10 @@ loop:
meta = metadata.Metadata{} meta = metadata.Metadata{}
metadataChanged = false metadataChanged = false
if sl.cache.getDropped(yoloString(met)) { if sl.cache.getDropped(met) {
continue continue
} }
ce, ok := sl.cache.get(yoloString(met)) ce, ok := sl.cache.get(met)
var ( var (
ref storage.SeriesRef ref storage.SeriesRef
lset labels.Labels lset labels.Labels
@ -1654,7 +1653,7 @@ loop:
// Bypass staleness logic if there is an explicit timestamp. // Bypass staleness logic if there is an explicit timestamp.
sl.cache.trackStaleness(hash, lset) sl.cache.trackStaleness(hash, lset)
} }
sl.cache.addRef(mets, ref, lset, hash) sl.cache.addRef(met, ref, lset, hash)
if sampleAdded && sampleLimitErr == nil { if sampleAdded && sampleLimitErr == nil {
seriesAdded++ seriesAdded++
} }
@ -1719,10 +1718,6 @@ loop:
return return
} }
func yoloString(b []byte) string {
return *((*string)(unsafe.Pointer(&b)))
}
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample limit errors. // whether the caller should continue to process more samples, and any sample limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) {
@ -1775,15 +1770,15 @@ func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appE
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// with scraped metrics in the cache. // with scraped metrics in the cache.
const ( var (
scrapeHealthMetricName = "up" + "\xff" scrapeHealthMetricName = []byte("up" + "\xff")
scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff")
scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff")
samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff")
scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff")
scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff")
scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff")
scrapeBodySizeBytesMetricName = "scrape_body_size_bytes" + "\xff" scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff")
) )
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
@ -1859,7 +1854,7 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
return return
} }
func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error { func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64) error {
ce, ok := sl.cache.get(s) ce, ok := sl.cache.get(s)
var ref storage.SeriesRef var ref storage.SeriesRef
var lset labels.Labels var lset labels.Labels
@ -1870,7 +1865,7 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions // The constants are suffixed with the invalid \xff unicode rune to avoid collisions
// with scraped metrics in the cache. // with scraped metrics in the cache.
// We have to drop it when building the actual metric. // We have to drop it when building the actual metric.
lset = labels.FromStrings(labels.MetricName, s[:len(s)-1]) lset = labels.FromStrings(labels.MetricName, string(s[:len(s)-1]))
lset = sl.reportSampleMutator(lset) lset = sl.reportSampleMutator(lset)
} }

@ -1586,21 +1586,21 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1) fakeRef := storage.SeriesRef(1)
expValue := float64(1) expValue := float64(1)
metric := `metric{n="1"} 1` metric := []byte(`metric{n="1"} 1`)
p, warning := textparse.New([]byte(metric), "") p, warning := textparse.New(metric, "")
require.NoError(t, warning) require.NoError(t, warning)
var lset labels.Labels var lset labels.Labels
p.Next() p.Next()
mets := p.Metric(&lset) p.Metric(&lset)
hash := lset.Hash() hash := lset.Hash()
// Create a fake entry in the cache // Create a fake entry in the cache
sl.cache.addRef(mets, fakeRef, lset, hash) sl.cache.addRef(metric, fakeRef, lset, hash)
now := time.Now() now := time.Now()
slApp := sl.appender(context.Background()) slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(metric), "", now) _, _, _, err := sl.append(slApp, metric, "", now)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, slApp.Commit()) require.NoError(t, slApp.Commit())

Loading…
Cancel
Save