mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-30 17:00:57 -06:00
build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.22.0 to 1.23.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.22.0...v1.23.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-version: 1.23.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
6
go.mod
6
go.mod
@@ -68,7 +68,7 @@ require (
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/xattr v0.4.12
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_golang v1.23.0
|
||||
github.com/r3labs/sse/v2 v2.10.0
|
||||
github.com/riandyrn/otelchi v0.12.1
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
@@ -281,8 +281,8 @@ require (
|
||||
github.com/pquerna/cachecontrol v0.2.0 // indirect
|
||||
github.com/prometheus/alertmanager v0.28.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/prometheus/statsd_exporter v0.22.8 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
|
||||
12
go.sum
12
go.sum
@@ -930,8 +930,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
||||
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
@@ -951,8 +951,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
@@ -963,8 +963,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
|
||||
github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0=
|
||||
github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM=
|
||||
|
||||
4
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
@@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||
if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
return groups
|
||||
@@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
_, err := fmt.Fprintf(buf, format, args...)
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
|
||||
25
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
25
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
|
||||
case pb.Counter != nil:
|
||||
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
|
||||
case pb.Histogram != nil:
|
||||
h := pb.Histogram
|
||||
for _, e := range m.exemplars {
|
||||
// pb.Histogram.Bucket are sorted by UpperBound.
|
||||
i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
|
||||
return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
|
||||
if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
|
||||
len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
|
||||
e.GetTimestamp() != nil {
|
||||
h.Exemplars = append(h.Exemplars, e)
|
||||
if len(h.Bucket) == 0 {
|
||||
// Don't proceed to classic buckets if there are none.
|
||||
continue
|
||||
}
|
||||
}
|
||||
// h.Bucket are sorted by UpperBound.
|
||||
i := sort.Search(len(h.Bucket), func(i int) bool {
|
||||
return h.Bucket[i].GetUpperBound() >= e.GetValue()
|
||||
})
|
||||
if i < len(pb.Histogram.Bucket) {
|
||||
pb.Histogram.Bucket[i].Exemplar = e
|
||||
if i < len(h.Bucket) {
|
||||
h.Bucket[i].Exemplar = e
|
||||
} else {
|
||||
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
|
||||
b := &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
|
||||
CumulativeCount: proto.Uint64(h.GetSampleCount()),
|
||||
UpperBound: proto.Float64(math.Inf(1)),
|
||||
Exemplar: e,
|
||||
}
|
||||
pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
|
||||
h.Bucket = append(h.Bucket, b)
|
||||
}
|
||||
}
|
||||
default:
|
||||
@@ -227,6 +237,7 @@ type Exemplar struct {
|
||||
// Only last applicable exemplar is injected from the list.
|
||||
// For example for Counter it means last exemplar is injected.
|
||||
// For Histogram, it means last applicable exemplar for each bucket is injected.
|
||||
// For a Native Histogram, all valid exemplars are injected.
|
||||
//
|
||||
// NewMetricWithExemplars works best with MustNewConstMetric and
|
||||
// MustNewConstHistogram, see example.
|
||||
|
||||
6
vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
generated
vendored
6
vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
generated
vendored
@@ -25,9 +25,9 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
|
||||
// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
|
||||
// isn't available.
|
||||
var notImplementedErr = errors.New("not implemented")
|
||||
var errNotImplemented = errors.New("not implemented")
|
||||
|
||||
type memoryInfo struct {
|
||||
vsize uint64 // Virtual memory size in bytes
|
||||
@@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
if memInfo, err := getMemory(); err == nil {
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
|
||||
} else if !errors.Is(err, notImplementedErr) {
|
||||
} else if !errors.Is(err, errNotImplemented) {
|
||||
// Don't report an error when support is not compiled in.
|
||||
c.reportError(ch, c.rss, err)
|
||||
c.reportError(ch, c.vsize, err)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
package prometheus
|
||||
|
||||
func getMemory() (*memoryInfo, error) {
|
||||
return nil, notImplementedErr
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for Darwin.
|
||||
|
||||
@@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
|
||||
if netstat, err := p.Netstat(); err == nil {
|
||||
var inOctets, outOctets float64
|
||||
if netstat.IpExt.InOctets != nil {
|
||||
inOctets = *netstat.IpExt.InOctets
|
||||
if netstat.InOctets != nil {
|
||||
inOctets = *netstat.InOctets
|
||||
}
|
||||
if netstat.IpExt.OutOctets != nil {
|
||||
outOctets = *netstat.IpExt.OutOctets
|
||||
if netstat.OutOctets != nil {
|
||||
outOctets = *netstat.OutOctets
|
||||
}
|
||||
ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
|
||||
ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
|
||||
|
||||
2
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
@@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
|
||||
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
|
||||
labels := prometheus.Labels{}
|
||||
|
||||
if !(code || method) {
|
||||
if !code && !method {
|
||||
return labels
|
||||
}
|
||||
|
||||
|
||||
10
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
|
||||
return m.deleteByHashWithLabelValues(h, lvs, m.curry)
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
@@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||
return m.deleteByHashWithLabels(h, labels, m.curry)
|
||||
}
|
||||
|
||||
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
|
||||
@@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
||||
labels, closer := constrainLabels(m.desc, labels)
|
||||
defer closer()
|
||||
|
||||
return m.metricMap.deleteByLabels(labels, m.curry)
|
||||
return m.deleteByLabels(labels, m.curry)
|
||||
}
|
||||
|
||||
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
||||
@@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
|
||||
return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
|
||||
}
|
||||
|
||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||
@@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
|
||||
return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||
|
||||
36
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
36
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||
// metric names that are standardized across applications, as that would break
|
||||
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||
// fact, those metrics are already prefixed with "go_" or "process_",
|
||||
// respectively.)
|
||||
//
|
||||
// Conflicts between Collectors registered through the original Registerer with
|
||||
@@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||
}
|
||||
}
|
||||
|
||||
// WrapCollectorWith returns a Collector wrapping the provided Collector. The
|
||||
// wrapped Collector will add the provided Labels to all Metrics it collects (as
|
||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||
// duplicate any of those labels.
|
||||
//
|
||||
// WrapCollectorWith can be useful to work with multiple instances of a third
|
||||
// party library that does not expose enough flexibility on the lifecycle of its
|
||||
// registered metrics.
|
||||
// For example, let's say you have a foo.New(reg Registerer) constructor that
|
||||
// registers metrics but never unregisters them, and you want to create multiple
|
||||
// instances of foo.Foo with different labels.
|
||||
// The way to achieve that, is to create a new Registry, pass it to foo.New,
|
||||
// then use WrapCollectorWith to wrap that Registry with the desired labels and
|
||||
// register that as a collector in your main Registry.
|
||||
// Then you can un-register the wrapped collector effectively un-registering the
|
||||
// metrics registered by foo.New.
|
||||
func WrapCollectorWith(labels Labels, c Collector) Collector {
|
||||
return &wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
|
||||
// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
|
||||
//
|
||||
// See the documentation of WrapCollectorWith for more details on the use case.
|
||||
func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
|
||||
return &wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingRegisterer struct {
|
||||
wrappedRegisterer Registerer
|
||||
prefix string
|
||||
|
||||
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
}
|
||||
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
||||
// labels to 'real' labels.
|
||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||
if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
|
||||
(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
|
||||
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||
}
|
||||
// Check for duplicate label names.
|
||||
|
||||
2
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
2
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
|
||||
return a.ResolvedAt(time.Now())
|
||||
}
|
||||
|
||||
// ResolvedAt returns true off the activity interval ended before
|
||||
// ResolvedAt returns true iff the activity interval ended before
|
||||
// the given timestamp.
|
||||
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||
if a.EndsAt.IsZero() {
|
||||
|
||||
5
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
5
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// AlertNameLabel is the name of the label containing the an alert's name.
|
||||
// AlertNameLabel is the name of the label containing the alert's name.
|
||||
AlertNameLabel = "alertname"
|
||||
|
||||
// ExportedLabelPrefix is the prefix to prepend to the label names present in
|
||||
@@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
// TODO: Apply De Morgan's law. Make sure there are tests for this.
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
28
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
28
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@@ -27,13 +27,25 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// NameValidationScheme determines the method of name validation to be used by
|
||||
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
|
||||
// mode in isolation from other components that don't support UTF-8 may result
|
||||
// in bugs or other undefined behavior. This value can be set to
|
||||
// LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
|
||||
// avoid need for locking, this value should be set once, ideally in an
|
||||
// init(), before multiple goroutines are started.
|
||||
// NameValidationScheme determines the global default method of the name
|
||||
// validation to be used by all calls to IsValidMetricName() and LabelName
|
||||
// IsValid().
|
||||
//
|
||||
// Deprecated: This variable should not be used and might be removed in the
|
||||
// far future. If you wish to stick to the legacy name validation use
|
||||
// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
|
||||
// instead. This variable is here as an escape hatch for emergency cases,
|
||||
// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
|
||||
// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
|
||||
// the change. In such a case, a temporary assignment to `LegacyValidation`
|
||||
// value in the `init()` function in your main.go or so, could be considered.
|
||||
//
|
||||
// Historically we opted for a global variable for feature gating different
|
||||
// validation schemes in operations that were not otherwise easily adjustable
|
||||
// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
|
||||
// Labels structure or package might have been a better choice. Given the
|
||||
// change was made and many upgraded the common already, we live this as-is
|
||||
// with this warning and learning for the future.
|
||||
NameValidationScheme = UTF8Validation
|
||||
|
||||
// NameEscapingScheme defines the default way that names will be escaped when
|
||||
@@ -50,7 +62,7 @@ var (
|
||||
type ValidationScheme int
|
||||
|
||||
const (
|
||||
// LegacyValidation is a setting that requirets that metric and label names
|
||||
// LegacyValidation is a setting that requires that all metric and label names
|
||||
// conform to the original Prometheus character requirements described by
|
||||
// MetricNameRE and LabelNameRE.
|
||||
LegacyValidation ValidationScheme = iota
|
||||
|
||||
25
vendor/github.com/prometheus/common/model/time.go
generated
vendored
25
vendor/github.com/prometheus/common/model/time.go
generated
vendored
@@ -201,6 +201,7 @@ var unitMap = map[string]struct {
|
||||
|
||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
// Negative durations are not supported.
|
||||
func ParseDuration(s string) (Duration, error) {
|
||||
switch s {
|
||||
case "0":
|
||||
@@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) {
|
||||
return 0, errors.New("duration out of range")
|
||||
}
|
||||
}
|
||||
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
|
||||
func ParseDurationAllowNegative(s string) (Duration, error) {
|
||||
if s == "" || s[0] != '-' {
|
||||
return ParseDuration(s)
|
||||
}
|
||||
|
||||
d, err := ParseDuration(s[1:])
|
||||
|
||||
return -d, err
|
||||
}
|
||||
|
||||
func (d Duration) String() string {
|
||||
var (
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
r = ""
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
r = ""
|
||||
sign = ""
|
||||
)
|
||||
|
||||
if ms == 0 {
|
||||
return "0s"
|
||||
}
|
||||
|
||||
if ms < 0 {
|
||||
sign, ms = "-", -ms
|
||||
}
|
||||
|
||||
f := func(unit string, mult int64, exact bool) {
|
||||
if exact && ms%mult != 0 {
|
||||
return
|
||||
@@ -286,7 +305,7 @@ func (d Duration) String() string {
|
||||
f("s", 1000, false)
|
||||
f("ms", 1, false)
|
||||
|
||||
return r
|
||||
return sign + r
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
|
||||
224
vendor/github.com/prometheus/common/promslog/slog.go
generated
vendored
224
vendor/github.com/prometheus/common/promslog/slog.go
generated
vendored
@@ -25,73 +25,43 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LogStyle represents the common logging formats in the Prometheus ecosystem.
|
||||
type LogStyle string
|
||||
|
||||
const (
|
||||
SlogStyle LogStyle = "slog"
|
||||
GoKitStyle LogStyle = "go-kit"
|
||||
|
||||
reservedKeyPrefix = "logged_"
|
||||
)
|
||||
|
||||
var (
|
||||
LevelFlagOptions = []string{"debug", "info", "warn", "error"}
|
||||
// LevelFlagOptions represents allowed logging levels.
|
||||
LevelFlagOptions = []string{"debug", "info", "warn", "error"}
|
||||
// FormatFlagOptions represents allowed formats.
|
||||
FormatFlagOptions = []string{"logfmt", "json"}
|
||||
|
||||
callerAddFunc = false
|
||||
defaultWriter = os.Stderr
|
||||
goKitStyleReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr {
|
||||
key := a.Key
|
||||
switch key {
|
||||
case slog.TimeKey:
|
||||
a.Key = "ts"
|
||||
|
||||
// This timestamp format differs from RFC3339Nano by using .000 instead
|
||||
// of .999999999 which changes the timestamp from 9 variable to 3 fixed
|
||||
// decimals (.130 instead of .130987456).
|
||||
t := a.Value.Time()
|
||||
a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00"))
|
||||
case slog.SourceKey:
|
||||
a.Key = "caller"
|
||||
src, _ := a.Value.Any().(*slog.Source)
|
||||
|
||||
switch callerAddFunc {
|
||||
case true:
|
||||
a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line))
|
||||
default:
|
||||
a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
|
||||
}
|
||||
case slog.LevelKey:
|
||||
a.Value = slog.StringValue(strings.ToLower(a.Value.String()))
|
||||
default:
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
defaultReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr {
|
||||
key := a.Key
|
||||
switch key {
|
||||
case slog.TimeKey:
|
||||
t := a.Value.Time()
|
||||
a.Value = slog.TimeValue(t.UTC())
|
||||
case slog.SourceKey:
|
||||
src, _ := a.Value.Any().(*slog.Source)
|
||||
a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
|
||||
default:
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
defaultWriter = os.Stderr
|
||||
)
|
||||
|
||||
// AllowedLevel is a settable identifier for the minimum level a log entry
|
||||
// must be have.
|
||||
type AllowedLevel struct {
|
||||
s string
|
||||
// Level controls a logging level, with an info default.
|
||||
// It wraps slog.LevelVar with string-based level control.
|
||||
// Level is safe to be used concurrently.
|
||||
type Level struct {
|
||||
lvl *slog.LevelVar
|
||||
}
|
||||
|
||||
func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
// NewLevel returns a new Level.
|
||||
func NewLevel() *Level {
|
||||
return &Level{
|
||||
lvl: &slog.LevelVar{},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Level) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
type plain string
|
||||
if err := unmarshal((*plain)(&s)); err != nil {
|
||||
@@ -100,55 +70,65 @@ func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
lo := &AllowedLevel{}
|
||||
if err := lo.Set(s); err != nil {
|
||||
if err := l.Set(s); err != nil {
|
||||
return err
|
||||
}
|
||||
*l = *lo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *AllowedLevel) String() string {
|
||||
return l.s
|
||||
// Level returns the value of the logging level as an slog.Level.
|
||||
func (l *Level) Level() slog.Level {
|
||||
return l.lvl.Level()
|
||||
}
|
||||
|
||||
// Set updates the value of the allowed level.
|
||||
func (l *AllowedLevel) Set(s string) error {
|
||||
if l.lvl == nil {
|
||||
l.lvl = &slog.LevelVar{}
|
||||
// String returns the current level.
|
||||
func (l *Level) String() string {
|
||||
switch l.lvl.Level() {
|
||||
case slog.LevelDebug:
|
||||
return "debug"
|
||||
case slog.LevelInfo:
|
||||
return "info"
|
||||
case slog.LevelWarn:
|
||||
return "warn"
|
||||
case slog.LevelError:
|
||||
return "error"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Set updates the logging level with the validation.
|
||||
func (l *Level) Set(s string) error {
|
||||
switch strings.ToLower(s) {
|
||||
case "debug":
|
||||
l.lvl.Set(slog.LevelDebug)
|
||||
callerAddFunc = true
|
||||
case "info":
|
||||
l.lvl.Set(slog.LevelInfo)
|
||||
callerAddFunc = false
|
||||
case "warn":
|
||||
l.lvl.Set(slog.LevelWarn)
|
||||
callerAddFunc = false
|
||||
case "error":
|
||||
l.lvl.Set(slog.LevelError)
|
||||
callerAddFunc = false
|
||||
default:
|
||||
return fmt.Errorf("unrecognized log level %s", s)
|
||||
}
|
||||
l.s = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllowedFormat is a settable identifier for the output format that the logger can have.
|
||||
type AllowedFormat struct {
|
||||
// Format controls a logging output format.
|
||||
// Not concurrency-safe.
|
||||
type Format struct {
|
||||
s string
|
||||
}
|
||||
|
||||
func (f *AllowedFormat) String() string {
|
||||
// NewFormat creates a new Format.
|
||||
func NewFormat() *Format { return &Format{} }
|
||||
|
||||
func (f *Format) String() string {
|
||||
return f.s
|
||||
}
|
||||
|
||||
// Set updates the value of the allowed format.
|
||||
func (f *AllowedFormat) Set(s string) error {
|
||||
func (f *Format) Set(s string) error {
|
||||
switch s {
|
||||
case "logfmt", "json":
|
||||
f.s = s
|
||||
@@ -160,18 +140,112 @@ func (f *AllowedFormat) Set(s string) error {
|
||||
|
||||
// Config is a struct containing configurable settings for the logger
|
||||
type Config struct {
|
||||
Level *AllowedLevel
|
||||
Format *AllowedFormat
|
||||
Level *Level
|
||||
Format *Format
|
||||
Style LogStyle
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
func newGoKitStyleReplaceAttrFunc(lvl *Level) func(groups []string, a slog.Attr) slog.Attr {
|
||||
return func(groups []string, a slog.Attr) slog.Attr {
|
||||
key := a.Key
|
||||
switch key {
|
||||
case slog.TimeKey, "ts":
|
||||
if t, ok := a.Value.Any().(time.Time); ok {
|
||||
a.Key = "ts"
|
||||
|
||||
// This timestamp format differs from RFC3339Nano by using .000 instead
|
||||
// of .999999999 which changes the timestamp from 9 variable to 3 fixed
|
||||
// decimals (.130 instead of .130987456).
|
||||
a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00"))
|
||||
} else {
|
||||
// If we can't cast the any from the value to a
|
||||
// time.Time, it means the caller logged
|
||||
// another attribute with a key of `ts`.
|
||||
// Prevent duplicate keys (necessary for proper
|
||||
// JSON) by renaming the key to `logged_ts`.
|
||||
a.Key = reservedKeyPrefix + key
|
||||
}
|
||||
case slog.SourceKey, "caller":
|
||||
if src, ok := a.Value.Any().(*slog.Source); ok {
|
||||
a.Key = "caller"
|
||||
switch lvl.String() {
|
||||
case "debug":
|
||||
a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line))
|
||||
default:
|
||||
a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
|
||||
}
|
||||
} else {
|
||||
// If we can't cast the any from the value to
|
||||
// an *slog.Source, it means the caller logged
|
||||
// another attribute with a key of `caller`.
|
||||
// Prevent duplicate keys (necessary for proper
|
||||
// JSON) by renaming the key to
|
||||
// `logged_caller`.
|
||||
a.Key = reservedKeyPrefix + key
|
||||
}
|
||||
case slog.LevelKey:
|
||||
if lvl, ok := a.Value.Any().(slog.Level); ok {
|
||||
a.Value = slog.StringValue(strings.ToLower(lvl.String()))
|
||||
} else {
|
||||
// If we can't cast the any from the value to
|
||||
// an slog.Level, it means the caller logged
|
||||
// another attribute with a key of `level`.
|
||||
// Prevent duplicate keys (necessary for proper
|
||||
// JSON) by renaming the key to `logged_level`.
|
||||
a.Key = reservedKeyPrefix + key
|
||||
}
|
||||
default:
|
||||
}
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func defaultReplaceAttr(_ []string, a slog.Attr) slog.Attr {
|
||||
key := a.Key
|
||||
switch key {
|
||||
case slog.TimeKey:
|
||||
// Note that we do not change the timezone to UTC anymore.
|
||||
if _, ok := a.Value.Any().(time.Time); !ok {
|
||||
// If we can't cast the any from the value to a
|
||||
// time.Time, it means the caller logged
|
||||
// another attribute with a key of `time`.
|
||||
// Prevent duplicate keys (necessary for proper
|
||||
// JSON) by renaming the key to `logged_time`.
|
||||
a.Key = reservedKeyPrefix + key
|
||||
}
|
||||
case slog.SourceKey:
|
||||
if src, ok := a.Value.Any().(*slog.Source); ok {
|
||||
a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line))
|
||||
} else {
|
||||
// If we can't cast the any from the value to
|
||||
// an *slog.Source, it means the caller logged
|
||||
// another attribute with a key of `source`.
|
||||
// Prevent duplicate keys (necessary for proper
|
||||
// JSON) by renaming the key to
|
||||
// `logged_source`.
|
||||
a.Key = reservedKeyPrefix + key
|
||||
}
|
||||
case slog.LevelKey:
|
||||
if _, ok := a.Value.Any().(slog.Level); !ok {
|
||||
// If we can't cast the any from the value to
|
||||
// an slog.Level, it means the caller logged
|
||||
// another attribute with a key of `level`.
|
||||
// Prevent duplicate keys (necessary for proper
|
||||
// JSON) by renaming the key to
|
||||
// `logged_level`.
|
||||
a.Key = reservedKeyPrefix + key
|
||||
}
|
||||
default:
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// New returns a new slog.Logger. Each logged line will be annotated
|
||||
// with a timestamp. The output always goes to stderr.
|
||||
func New(config *Config) *slog.Logger {
|
||||
if config.Level == nil {
|
||||
config.Level = &AllowedLevel{}
|
||||
_ = config.Level.Set("info")
|
||||
config.Level = NewLevel()
|
||||
}
|
||||
|
||||
if config.Writer == nil {
|
||||
@@ -181,11 +255,11 @@ func New(config *Config) *slog.Logger {
|
||||
logHandlerOpts := &slog.HandlerOptions{
|
||||
Level: config.Level.lvl,
|
||||
AddSource: true,
|
||||
ReplaceAttr: defaultReplaceAttrFunc,
|
||||
ReplaceAttr: defaultReplaceAttr,
|
||||
}
|
||||
|
||||
if config.Style == GoKitStyle {
|
||||
logHandlerOpts.ReplaceAttr = goKitStyleReplaceAttrFunc
|
||||
logHandlerOpts.ReplaceAttr = newGoKitStyleReplaceAttrFunc(config.Level)
|
||||
}
|
||||
|
||||
if config.Format != nil && config.Format.s == "json" {
|
||||
@@ -197,5 +271,5 @@ func New(config *Config) *slog.Logger {
|
||||
// NewNopLogger is a convenience function to return an slog.Logger that writes
|
||||
// to io.Discard.
|
||||
func NewNopLogger() *slog.Logger {
|
||||
return slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||
return New(&Config{Writer: io.Discard})
|
||||
}
|
||||
|
||||
63
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
63
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@@ -1,22 +1,45 @@
|
||||
---
|
||||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- godot
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linter-settings:
|
||||
godot:
|
||||
capital: true
|
||||
exclude:
|
||||
# Ignore "See: URL"
|
||||
- 'See:'
|
||||
misspell:
|
||||
locale: US
|
||||
- forbidigo
|
||||
- godot
|
||||
- misspell
|
||||
- revive
|
||||
- testifylint
|
||||
settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- pattern: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
exclude:
|
||||
# Ignore "See: URL".
|
||||
- 'See:'
|
||||
capital: true
|
||||
misspell:
|
||||
locale: US
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/prometheus/procfs
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
10
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
10
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
@@ -275,3 +275,9 @@ $(1)_precheck:
|
||||
exit 1; \
|
||||
fi
|
||||
endef
|
||||
|
||||
govulncheck: install-govulncheck
|
||||
govulncheck ./...
|
||||
|
||||
install-govulncheck:
|
||||
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/README.md
generated
vendored
6
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
|
||||
The procfs library includes a set of test fixtures which include many example files from
|
||||
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||
ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
|
||||
|
||||
```bash
|
||||
rm -rf testdata/fixtures
|
||||
make test
|
||||
```
|
||||
|
||||
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||
Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
|
||||
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||
`git diff testdata/fixtures.ttar`.
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
4
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
@@ -23,9 +23,9 @@ import (
|
||||
|
||||
// Learned from include/uapi/linux/if_arp.h.
|
||||
const (
|
||||
// completed entry (ha valid).
|
||||
// Completed entry (ha valid).
|
||||
ATFComplete = 0x02
|
||||
// permanent entry.
|
||||
// Permanent entry.
|
||||
ATFPermanent = 0x04
|
||||
// Publish entry.
|
||||
ATFPublish = 0x08
|
||||
|
||||
10
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
10
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@@ -24,8 +24,14 @@ type FS struct {
|
||||
isReal bool
|
||||
}
|
||||
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
const (
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
|
||||
// SectorSize represents the size of a sector in bytes.
|
||||
// It is specific to Linux block I/O operations.
|
||||
SectorSize = 512
|
||||
)
|
||||
|
||||
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||
// It will error if the mount point directory can't be read or is a file.
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
@@ -17,7 +17,7 @@
|
||||
package procfs
|
||||
|
||||
// isRealProc returns true on architectures that don't have a Type argument
|
||||
// in their Statfs_t struct
|
||||
func isRealProc(mountPoint string) (bool, error) {
|
||||
// in their Statfs_t struct.
|
||||
func isRealProc(_ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
6
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
|
||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
||||
// Number of release reqs ignored due to in-progress store
|
||||
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
||||
// Number of page stores cancelled due to release req
|
||||
// Number of page stores canceled due to release req
|
||||
PageStoresCancelledByReleaseRequests uint64
|
||||
VmscanWaiting uint64
|
||||
// Number of times async ops added to pending queues
|
||||
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
|
||||
OpsRunning uint64
|
||||
// Number of times async ops queued for processing
|
||||
OpsEnqueued uint64
|
||||
// Number of async ops cancelled
|
||||
// Number of async ops canceled
|
||||
OpsCancelled uint64
|
||||
// Number of async ops rejected due to object lookup/create failure
|
||||
OpsRejected uint64
|
||||
// Number of async ops initialised
|
||||
// Number of async ops initialized
|
||||
OpsInitialised uint64
|
||||
// Number of async ops queued for deferred release
|
||||
OpsDeferred uint64
|
||||
|
||||
3
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
3
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
@@ -28,6 +28,9 @@ const (
|
||||
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||
|
||||
// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
|
||||
DefaultSelinuxMountPoint = "/sys/fs/selinux"
|
||||
)
|
||||
|
||||
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||
|
||||
14
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
14
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@@ -14,6 +14,7 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
|
||||
}
|
||||
return &truth
|
||||
}
|
||||
|
||||
// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
|
||||
func ReadHexFromFile(path string) (uint64, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hexString := strings.TrimSpace(string(data))
|
||||
if !strings.HasPrefix(hexString, "0x") {
|
||||
return 0, errors.New("invalid format: hex string does not start with '0x'")
|
||||
}
|
||||
return strconv.ParseUint(hexString[2:], 16, 64)
|
||||
}
|
||||
|
||||
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
@@ -20,6 +20,8 @@ package util
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
@@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
|
||||
|
||||
return string(bytes.TrimSpace(b[:n])), nil
|
||||
}
|
||||
|
||||
// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
|
||||
func SysReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
|
||||
func SysReadIntFromFile(path string) (int64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
27
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
27
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@@ -45,11 +45,11 @@ const (
|
||||
fieldTransport11TCPLen = 13
|
||||
fieldTransport11UDPLen = 10
|
||||
|
||||
// kernel version >= 4.14 MaxLen
|
||||
// Kernel version >= 4.14 MaxLen
|
||||
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
|
||||
fieldTransport11RDMAMaxLen = 28
|
||||
|
||||
// kernel version <= 4.2 MinLen
|
||||
// Kernel version <= 4.2 MinLen
|
||||
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
|
||||
fieldTransport11RDMAMinLen = 20
|
||||
)
|
||||
@@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
switch statVersion {
|
||||
case statVersion10:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport10TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport10UDPLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
@@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
}
|
||||
case statVersion11:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport11TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport11UDPLen
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
expectedLength = fieldTransport11RDMAMinLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
|
||||
@@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
// For the udp RPC transport there is no connection count, connect idle time,
|
||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||
// we set them to 0 here.
|
||||
if protocol == "udp" {
|
||||
switch protocol {
|
||||
case "udp":
|
||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||
} else if protocol == "tcp" {
|
||||
case "tcp":
|
||||
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
|
||||
}
|
||||
|
||||
|
||||
96
vendor/github.com/prometheus/procfs/net_dev_snmp6.go
generated
vendored
Normal file
96
vendor/github.com/prometheus/procfs/net_dev_snmp6.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
|
||||
// The outer map's keys are interface names and the inner map's keys are stat names.
|
||||
//
|
||||
// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
|
||||
type NetDevSNMP6 map[string]map[string]uint64
|
||||
|
||||
// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
|
||||
// directory.
|
||||
func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||
return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
|
||||
}
|
||||
|
||||
// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
|
||||
// directory.
|
||||
func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||
return newNetDevSNMP6(p.path("net/dev_snmp6"))
|
||||
}
|
||||
|
||||
// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
|
||||
func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
|
||||
netDevSNMP6 := make(NetDevSNMP6)
|
||||
|
||||
// The net/dev_snmp6 folders contain one file per interface
|
||||
ifaceFiles, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
// On systems with IPv6 disabled, this directory won't exist.
|
||||
// Do nothing.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
|
||||
for _, iFaceFile := range ifaceFiles {
|
||||
f, err := os.Open(dir + "/" + iFaceFile.Name())
|
||||
if err != nil {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
|
||||
if err != nil {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
}
|
||||
|
||||
return netDevSNMP6, nil
|
||||
}
|
||||
|
||||
func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
|
||||
m := make(map[string]uint64)
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
stat := strings.Fields(scanner.Text())
|
||||
if len(stat) < 2 {
|
||||
continue
|
||||
}
|
||||
key, val := stat[0], stat[1]
|
||||
|
||||
// Expect stat name to contain "6" or be "ifIndex"
|
||||
if strings.Contains(key, "6") || key == "ifIndex" {
|
||||
v, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
m[key] = v
|
||||
}
|
||||
}
|
||||
return m, scanner.Err()
|
||||
}
|
||||
8
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// Maximum size limit used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
@@ -50,12 +50,12 @@ type (
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||
// Drops shows the total number of dropped packets of all UDP sockets.
|
||||
Drops *uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// A single line parser for fields from /proc/net/{t,u}dp{,6}.
|
||||
// Fields which are not used by IPSocket are skipped.
|
||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
|
||||
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
@@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[4] == enabled {
|
||||
switch fields[4] {
|
||||
case enabled:
|
||||
line.Pressure = 1
|
||||
} else if fields[4] == disabled {
|
||||
case disabled:
|
||||
line.Pressure = 0
|
||||
} else {
|
||||
default:
|
||||
line.Pressure = -1
|
||||
}
|
||||
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[6] == enabled {
|
||||
switch fields[6] {
|
||||
case enabled:
|
||||
line.Slab = true
|
||||
} else if fields[6] == disabled {
|
||||
case disabled:
|
||||
line.Slab = false
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
|
||||
}
|
||||
line.ModuleName = fields[7]
|
||||
@@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
|
||||
}
|
||||
|
||||
for i := 0; i < len(capabilities); i++ {
|
||||
if capabilities[i] == "y" {
|
||||
switch capabilities[i] {
|
||||
case "y":
|
||||
*capabilityFields[i] = true
|
||||
} else if capabilities[i] == "n" {
|
||||
case "n":
|
||||
*capabilityFields[i] = false
|
||||
} else {
|
||||
default:
|
||||
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
|
||||
}
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
@@ -25,24 +25,28 @@ type (
|
||||
|
||||
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||
func (fs FS) NetTCP() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp6.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||
func (fs FS) NetTCP6() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
// NetTCPSummary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6Summary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp6.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
8
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||
return &nu, nil
|
||||
}
|
||||
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
l := len(fields)
|
||||
if l < min {
|
||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
|
||||
if l < minFields {
|
||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
|
||||
}
|
||||
|
||||
// Field offsets are as follows:
|
||||
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
||||
}
|
||||
|
||||
// Path field is optional.
|
||||
if l > min {
|
||||
if l > minFields {
|
||||
// Path occurs at either index 6 or 7 depending on whether inode is
|
||||
// already present.
|
||||
pathIdx := 7
|
||||
|
||||
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@@ -37,9 +37,9 @@ type Proc struct {
|
||||
type Procs []Proc
|
||||
|
||||
var (
|
||||
ErrFileParse = errors.New("Error Parsing File")
|
||||
ErrFileRead = errors.New("Error Reading File")
|
||||
ErrMountPoint = errors.New("Error Accessing Mount point")
|
||||
ErrFileParse = errors.New("error parsing file")
|
||||
ErrFileRead = errors.New("error reading file")
|
||||
ErrMountPoint = errors.New("error accessing mount point")
|
||||
)
|
||||
|
||||
func (p Procs) Len() int { return len(p) }
|
||||
@@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
||||
pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
|
||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
|
||||
|
||||
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
||||
"read_bytes: %d\nwrite_bytes: %d\n" +
|
||||
"cancelled_write_bytes: %d\n"
|
||||
"cancelled_write_bytes: %d\n" //nolint:misspell
|
||||
|
||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||
|
||||
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
@@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
||||
case "TcpExt":
|
||||
switch key {
|
||||
case "SyncookiesSent":
|
||||
procNetstat.TcpExt.SyncookiesSent = &value
|
||||
procNetstat.SyncookiesSent = &value
|
||||
case "SyncookiesRecv":
|
||||
procNetstat.TcpExt.SyncookiesRecv = &value
|
||||
procNetstat.SyncookiesRecv = &value
|
||||
case "SyncookiesFailed":
|
||||
procNetstat.TcpExt.SyncookiesFailed = &value
|
||||
procNetstat.SyncookiesFailed = &value
|
||||
case "EmbryonicRsts":
|
||||
procNetstat.TcpExt.EmbryonicRsts = &value
|
||||
procNetstat.EmbryonicRsts = &value
|
||||
case "PruneCalled":
|
||||
procNetstat.TcpExt.PruneCalled = &value
|
||||
procNetstat.PruneCalled = &value
|
||||
case "RcvPruned":
|
||||
procNetstat.TcpExt.RcvPruned = &value
|
||||
procNetstat.RcvPruned = &value
|
||||
case "OfoPruned":
|
||||
procNetstat.TcpExt.OfoPruned = &value
|
||||
procNetstat.OfoPruned = &value
|
||||
case "OutOfWindowIcmps":
|
||||
procNetstat.TcpExt.OutOfWindowIcmps = &value
|
||||
procNetstat.OutOfWindowIcmps = &value
|
||||
case "LockDroppedIcmps":
|
||||
procNetstat.TcpExt.LockDroppedIcmps = &value
|
||||
procNetstat.LockDroppedIcmps = &value
|
||||
case "ArpFilter":
|
||||
procNetstat.TcpExt.ArpFilter = &value
|
||||
procNetstat.ArpFilter = &value
|
||||
case "TW":
|
||||
procNetstat.TcpExt.TW = &value
|
||||
procNetstat.TW = &value
|
||||
case "TWRecycled":
|
||||
procNetstat.TcpExt.TWRecycled = &value
|
||||
procNetstat.TWRecycled = &value
|
||||
case "TWKilled":
|
||||
procNetstat.TcpExt.TWKilled = &value
|
||||
procNetstat.TWKilled = &value
|
||||
case "PAWSActive":
|
||||
procNetstat.TcpExt.PAWSActive = &value
|
||||
procNetstat.PAWSActive = &value
|
||||
case "PAWSEstab":
|
||||
procNetstat.TcpExt.PAWSEstab = &value
|
||||
procNetstat.PAWSEstab = &value
|
||||
case "DelayedACKs":
|
||||
procNetstat.TcpExt.DelayedACKs = &value
|
||||
procNetstat.DelayedACKs = &value
|
||||
case "DelayedACKLocked":
|
||||
procNetstat.TcpExt.DelayedACKLocked = &value
|
||||
procNetstat.DelayedACKLocked = &value
|
||||
case "DelayedACKLost":
|
||||
procNetstat.TcpExt.DelayedACKLost = &value
|
||||
procNetstat.DelayedACKLost = &value
|
||||
case "ListenOverflows":
|
||||
procNetstat.TcpExt.ListenOverflows = &value
|
||||
procNetstat.ListenOverflows = &value
|
||||
case "ListenDrops":
|
||||
procNetstat.TcpExt.ListenDrops = &value
|
||||
procNetstat.ListenDrops = &value
|
||||
case "TCPHPHits":
|
||||
procNetstat.TcpExt.TCPHPHits = &value
|
||||
procNetstat.TCPHPHits = &value
|
||||
case "TCPPureAcks":
|
||||
procNetstat.TcpExt.TCPPureAcks = &value
|
||||
procNetstat.TCPPureAcks = &value
|
||||
case "TCPHPAcks":
|
||||
procNetstat.TcpExt.TCPHPAcks = &value
|
||||
procNetstat.TCPHPAcks = &value
|
||||
case "TCPRenoRecovery":
|
||||
procNetstat.TcpExt.TCPRenoRecovery = &value
|
||||
procNetstat.TCPRenoRecovery = &value
|
||||
case "TCPSackRecovery":
|
||||
procNetstat.TcpExt.TCPSackRecovery = &value
|
||||
procNetstat.TCPSackRecovery = &value
|
||||
case "TCPSACKReneging":
|
||||
procNetstat.TcpExt.TCPSACKReneging = &value
|
||||
procNetstat.TCPSACKReneging = &value
|
||||
case "TCPSACKReorder":
|
||||
procNetstat.TcpExt.TCPSACKReorder = &value
|
||||
procNetstat.TCPSACKReorder = &value
|
||||
case "TCPRenoReorder":
|
||||
procNetstat.TcpExt.TCPRenoReorder = &value
|
||||
procNetstat.TCPRenoReorder = &value
|
||||
case "TCPTSReorder":
|
||||
procNetstat.TcpExt.TCPTSReorder = &value
|
||||
procNetstat.TCPTSReorder = &value
|
||||
case "TCPFullUndo":
|
||||
procNetstat.TcpExt.TCPFullUndo = &value
|
||||
procNetstat.TCPFullUndo = &value
|
||||
case "TCPPartialUndo":
|
||||
procNetstat.TcpExt.TCPPartialUndo = &value
|
||||
procNetstat.TCPPartialUndo = &value
|
||||
case "TCPDSACKUndo":
|
||||
procNetstat.TcpExt.TCPDSACKUndo = &value
|
||||
procNetstat.TCPDSACKUndo = &value
|
||||
case "TCPLossUndo":
|
||||
procNetstat.TcpExt.TCPLossUndo = &value
|
||||
procNetstat.TCPLossUndo = &value
|
||||
case "TCPLostRetransmit":
|
||||
procNetstat.TcpExt.TCPLostRetransmit = &value
|
||||
procNetstat.TCPLostRetransmit = &value
|
||||
case "TCPRenoFailures":
|
||||
procNetstat.TcpExt.TCPRenoFailures = &value
|
||||
procNetstat.TCPRenoFailures = &value
|
||||
case "TCPSackFailures":
|
||||
procNetstat.TcpExt.TCPSackFailures = &value
|
||||
procNetstat.TCPSackFailures = &value
|
||||
case "TCPLossFailures":
|
||||
procNetstat.TcpExt.TCPLossFailures = &value
|
||||
procNetstat.TCPLossFailures = &value
|
||||
case "TCPFastRetrans":
|
||||
procNetstat.TcpExt.TCPFastRetrans = &value
|
||||
procNetstat.TCPFastRetrans = &value
|
||||
case "TCPSlowStartRetrans":
|
||||
procNetstat.TcpExt.TCPSlowStartRetrans = &value
|
||||
procNetstat.TCPSlowStartRetrans = &value
|
||||
case "TCPTimeouts":
|
||||
procNetstat.TcpExt.TCPTimeouts = &value
|
||||
procNetstat.TCPTimeouts = &value
|
||||
case "TCPLossProbes":
|
||||
procNetstat.TcpExt.TCPLossProbes = &value
|
||||
procNetstat.TCPLossProbes = &value
|
||||
case "TCPLossProbeRecovery":
|
||||
procNetstat.TcpExt.TCPLossProbeRecovery = &value
|
||||
procNetstat.TCPLossProbeRecovery = &value
|
||||
case "TCPRenoRecoveryFail":
|
||||
procNetstat.TcpExt.TCPRenoRecoveryFail = &value
|
||||
procNetstat.TCPRenoRecoveryFail = &value
|
||||
case "TCPSackRecoveryFail":
|
||||
procNetstat.TcpExt.TCPSackRecoveryFail = &value
|
||||
procNetstat.TCPSackRecoveryFail = &value
|
||||
case "TCPRcvCollapsed":
|
||||
procNetstat.TcpExt.TCPRcvCollapsed = &value
|
||||
procNetstat.TCPRcvCollapsed = &value
|
||||
case "TCPDSACKOldSent":
|
||||
procNetstat.TcpExt.TCPDSACKOldSent = &value
|
||||
procNetstat.TCPDSACKOldSent = &value
|
||||
case "TCPDSACKOfoSent":
|
||||
procNetstat.TcpExt.TCPDSACKOfoSent = &value
|
||||
procNetstat.TCPDSACKOfoSent = &value
|
||||
case "TCPDSACKRecv":
|
||||
procNetstat.TcpExt.TCPDSACKRecv = &value
|
||||
procNetstat.TCPDSACKRecv = &value
|
||||
case "TCPDSACKOfoRecv":
|
||||
procNetstat.TcpExt.TCPDSACKOfoRecv = &value
|
||||
procNetstat.TCPDSACKOfoRecv = &value
|
||||
case "TCPAbortOnData":
|
||||
procNetstat.TcpExt.TCPAbortOnData = &value
|
||||
procNetstat.TCPAbortOnData = &value
|
||||
case "TCPAbortOnClose":
|
||||
procNetstat.TcpExt.TCPAbortOnClose = &value
|
||||
procNetstat.TCPAbortOnClose = &value
|
||||
case "TCPDeferAcceptDrop":
|
||||
procNetstat.TcpExt.TCPDeferAcceptDrop = &value
|
||||
procNetstat.TCPDeferAcceptDrop = &value
|
||||
case "IPReversePathFilter":
|
||||
procNetstat.TcpExt.IPReversePathFilter = &value
|
||||
procNetstat.IPReversePathFilter = &value
|
||||
case "TCPTimeWaitOverflow":
|
||||
procNetstat.TcpExt.TCPTimeWaitOverflow = &value
|
||||
procNetstat.TCPTimeWaitOverflow = &value
|
||||
case "TCPReqQFullDoCookies":
|
||||
procNetstat.TcpExt.TCPReqQFullDoCookies = &value
|
||||
procNetstat.TCPReqQFullDoCookies = &value
|
||||
case "TCPReqQFullDrop":
|
||||
procNetstat.TcpExt.TCPReqQFullDrop = &value
|
||||
procNetstat.TCPReqQFullDrop = &value
|
||||
case "TCPRetransFail":
|
||||
procNetstat.TcpExt.TCPRetransFail = &value
|
||||
procNetstat.TCPRetransFail = &value
|
||||
case "TCPRcvCoalesce":
|
||||
procNetstat.TcpExt.TCPRcvCoalesce = &value
|
||||
procNetstat.TCPRcvCoalesce = &value
|
||||
case "TCPRcvQDrop":
|
||||
procNetstat.TcpExt.TCPRcvQDrop = &value
|
||||
procNetstat.TCPRcvQDrop = &value
|
||||
case "TCPOFOQueue":
|
||||
procNetstat.TcpExt.TCPOFOQueue = &value
|
||||
procNetstat.TCPOFOQueue = &value
|
||||
case "TCPOFODrop":
|
||||
procNetstat.TcpExt.TCPOFODrop = &value
|
||||
procNetstat.TCPOFODrop = &value
|
||||
case "TCPOFOMerge":
|
||||
procNetstat.TcpExt.TCPOFOMerge = &value
|
||||
procNetstat.TCPOFOMerge = &value
|
||||
case "TCPChallengeACK":
|
||||
procNetstat.TcpExt.TCPChallengeACK = &value
|
||||
procNetstat.TCPChallengeACK = &value
|
||||
case "TCPSYNChallenge":
|
||||
procNetstat.TcpExt.TCPSYNChallenge = &value
|
||||
procNetstat.TCPSYNChallenge = &value
|
||||
case "TCPFastOpenActive":
|
||||
procNetstat.TcpExt.TCPFastOpenActive = &value
|
||||
procNetstat.TCPFastOpenActive = &value
|
||||
case "TCPFastOpenActiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenActiveFail = &value
|
||||
procNetstat.TCPFastOpenActiveFail = &value
|
||||
case "TCPFastOpenPassive":
|
||||
procNetstat.TcpExt.TCPFastOpenPassive = &value
|
||||
procNetstat.TCPFastOpenPassive = &value
|
||||
case "TCPFastOpenPassiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
|
||||
procNetstat.TCPFastOpenPassiveFail = &value
|
||||
case "TCPFastOpenListenOverflow":
|
||||
procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
|
||||
procNetstat.TCPFastOpenListenOverflow = &value
|
||||
case "TCPFastOpenCookieReqd":
|
||||
procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
|
||||
procNetstat.TCPFastOpenCookieReqd = &value
|
||||
case "TCPFastOpenBlackhole":
|
||||
procNetstat.TcpExt.TCPFastOpenBlackhole = &value
|
||||
procNetstat.TCPFastOpenBlackhole = &value
|
||||
case "TCPSpuriousRtxHostQueues":
|
||||
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
|
||||
procNetstat.TCPSpuriousRtxHostQueues = &value
|
||||
case "BusyPollRxPackets":
|
||||
procNetstat.TcpExt.BusyPollRxPackets = &value
|
||||
procNetstat.BusyPollRxPackets = &value
|
||||
case "TCPAutoCorking":
|
||||
procNetstat.TcpExt.TCPAutoCorking = &value
|
||||
procNetstat.TCPAutoCorking = &value
|
||||
case "TCPFromZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
|
||||
procNetstat.TCPFromZeroWindowAdv = &value
|
||||
case "TCPToZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPToZeroWindowAdv = &value
|
||||
procNetstat.TCPToZeroWindowAdv = &value
|
||||
case "TCPWantZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
|
||||
procNetstat.TCPWantZeroWindowAdv = &value
|
||||
case "TCPSynRetrans":
|
||||
procNetstat.TcpExt.TCPSynRetrans = &value
|
||||
procNetstat.TCPSynRetrans = &value
|
||||
case "TCPOrigDataSent":
|
||||
procNetstat.TcpExt.TCPOrigDataSent = &value
|
||||
procNetstat.TCPOrigDataSent = &value
|
||||
case "TCPHystartTrainDetect":
|
||||
procNetstat.TcpExt.TCPHystartTrainDetect = &value
|
||||
procNetstat.TCPHystartTrainDetect = &value
|
||||
case "TCPHystartTrainCwnd":
|
||||
procNetstat.TcpExt.TCPHystartTrainCwnd = &value
|
||||
procNetstat.TCPHystartTrainCwnd = &value
|
||||
case "TCPHystartDelayDetect":
|
||||
procNetstat.TcpExt.TCPHystartDelayDetect = &value
|
||||
procNetstat.TCPHystartDelayDetect = &value
|
||||
case "TCPHystartDelayCwnd":
|
||||
procNetstat.TcpExt.TCPHystartDelayCwnd = &value
|
||||
procNetstat.TCPHystartDelayCwnd = &value
|
||||
case "TCPACKSkippedSynRecv":
|
||||
procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
|
||||
procNetstat.TCPACKSkippedSynRecv = &value
|
||||
case "TCPACKSkippedPAWS":
|
||||
procNetstat.TcpExt.TCPACKSkippedPAWS = &value
|
||||
procNetstat.TCPACKSkippedPAWS = &value
|
||||
case "TCPACKSkippedSeq":
|
||||
procNetstat.TcpExt.TCPACKSkippedSeq = &value
|
||||
procNetstat.TCPACKSkippedSeq = &value
|
||||
case "TCPACKSkippedFinWait2":
|
||||
procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
|
||||
procNetstat.TCPACKSkippedFinWait2 = &value
|
||||
case "TCPACKSkippedTimeWait":
|
||||
procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
|
||||
procNetstat.TCPACKSkippedTimeWait = &value
|
||||
case "TCPACKSkippedChallenge":
|
||||
procNetstat.TcpExt.TCPACKSkippedChallenge = &value
|
||||
procNetstat.TCPACKSkippedChallenge = &value
|
||||
case "TCPWinProbe":
|
||||
procNetstat.TcpExt.TCPWinProbe = &value
|
||||
procNetstat.TCPWinProbe = &value
|
||||
case "TCPKeepAlive":
|
||||
procNetstat.TcpExt.TCPKeepAlive = &value
|
||||
procNetstat.TCPKeepAlive = &value
|
||||
case "TCPMTUPFail":
|
||||
procNetstat.TcpExt.TCPMTUPFail = &value
|
||||
procNetstat.TCPMTUPFail = &value
|
||||
case "TCPMTUPSuccess":
|
||||
procNetstat.TcpExt.TCPMTUPSuccess = &value
|
||||
procNetstat.TCPMTUPSuccess = &value
|
||||
case "TCPWqueueTooBig":
|
||||
procNetstat.TcpExt.TCPWqueueTooBig = &value
|
||||
procNetstat.TCPWqueueTooBig = &value
|
||||
}
|
||||
case "IpExt":
|
||||
switch key {
|
||||
case "InNoRoutes":
|
||||
procNetstat.IpExt.InNoRoutes = &value
|
||||
procNetstat.InNoRoutes = &value
|
||||
case "InTruncatedPkts":
|
||||
procNetstat.IpExt.InTruncatedPkts = &value
|
||||
procNetstat.InTruncatedPkts = &value
|
||||
case "InMcastPkts":
|
||||
procNetstat.IpExt.InMcastPkts = &value
|
||||
procNetstat.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procNetstat.IpExt.OutMcastPkts = &value
|
||||
procNetstat.OutMcastPkts = &value
|
||||
case "InBcastPkts":
|
||||
procNetstat.IpExt.InBcastPkts = &value
|
||||
procNetstat.InBcastPkts = &value
|
||||
case "OutBcastPkts":
|
||||
procNetstat.IpExt.OutBcastPkts = &value
|
||||
procNetstat.OutBcastPkts = &value
|
||||
case "InOctets":
|
||||
procNetstat.IpExt.InOctets = &value
|
||||
procNetstat.InOctets = &value
|
||||
case "OutOctets":
|
||||
procNetstat.IpExt.OutOctets = &value
|
||||
procNetstat.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procNetstat.IpExt.InMcastOctets = &value
|
||||
procNetstat.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procNetstat.IpExt.OutMcastOctets = &value
|
||||
procNetstat.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procNetstat.IpExt.InBcastOctets = &value
|
||||
procNetstat.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procNetstat.IpExt.OutBcastOctets = &value
|
||||
procNetstat.OutBcastOctets = &value
|
||||
case "InCsumErrors":
|
||||
procNetstat.IpExt.InCsumErrors = &value
|
||||
procNetstat.InCsumErrors = &value
|
||||
case "InNoECTPkts":
|
||||
procNetstat.IpExt.InNoECTPkts = &value
|
||||
procNetstat.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procNetstat.IpExt.InECT1Pkts = &value
|
||||
procNetstat.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procNetstat.IpExt.InECT0Pkts = &value
|
||||
procNetstat.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procNetstat.IpExt.InCEPkts = &value
|
||||
procNetstat.InCEPkts = &value
|
||||
case "ReasmOverlaps":
|
||||
procNetstat.IpExt.ReasmOverlaps = &value
|
||||
procNetstat.ReasmOverlaps = &value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
@@ -19,7 +19,6 @@ package procfs
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -29,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||
// Match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
||||
)
|
||||
|
||||
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
|
||||
func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||
kv := strings.SplitN(line, ":", 2)
|
||||
if len(kv) != 2 {
|
||||
fmt.Println(line)
|
||||
return errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
|
||||
|
||||
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
@@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
||||
case "Ip":
|
||||
switch key {
|
||||
case "Forwarding":
|
||||
procSnmp.Ip.Forwarding = &value
|
||||
procSnmp.Forwarding = &value
|
||||
case "DefaultTTL":
|
||||
procSnmp.Ip.DefaultTTL = &value
|
||||
procSnmp.DefaultTTL = &value
|
||||
case "InReceives":
|
||||
procSnmp.Ip.InReceives = &value
|
||||
procSnmp.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp.Ip.InHdrErrors = &value
|
||||
procSnmp.InHdrErrors = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp.Ip.InAddrErrors = &value
|
||||
procSnmp.InAddrErrors = &value
|
||||
case "ForwDatagrams":
|
||||
procSnmp.Ip.ForwDatagrams = &value
|
||||
procSnmp.ForwDatagrams = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp.Ip.InUnknownProtos = &value
|
||||
procSnmp.InUnknownProtos = &value
|
||||
case "InDiscards":
|
||||
procSnmp.Ip.InDiscards = &value
|
||||
procSnmp.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp.Ip.InDelivers = &value
|
||||
procSnmp.InDelivers = &value
|
||||
case "OutRequests":
|
||||
procSnmp.Ip.OutRequests = &value
|
||||
procSnmp.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp.Ip.OutDiscards = &value
|
||||
procSnmp.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp.Ip.OutNoRoutes = &value
|
||||
procSnmp.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp.Ip.ReasmTimeout = &value
|
||||
procSnmp.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp.Ip.ReasmReqds = &value
|
||||
procSnmp.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp.Ip.ReasmOKs = &value
|
||||
procSnmp.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp.Ip.ReasmFails = &value
|
||||
procSnmp.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp.Ip.FragOKs = &value
|
||||
procSnmp.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp.Ip.FragFails = &value
|
||||
procSnmp.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp.Ip.FragCreates = &value
|
||||
procSnmp.FragCreates = &value
|
||||
}
|
||||
case "Icmp":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp.Icmp.InMsgs = &value
|
||||
procSnmp.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp.Icmp.InErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Icmp.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp.Icmp.InDestUnreachs = &value
|
||||
procSnmp.InDestUnreachs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp.Icmp.InTimeExcds = &value
|
||||
procSnmp.InTimeExcds = &value
|
||||
case "InParmProbs":
|
||||
procSnmp.Icmp.InParmProbs = &value
|
||||
procSnmp.InParmProbs = &value
|
||||
case "InSrcQuenchs":
|
||||
procSnmp.Icmp.InSrcQuenchs = &value
|
||||
procSnmp.InSrcQuenchs = &value
|
||||
case "InRedirects":
|
||||
procSnmp.Icmp.InRedirects = &value
|
||||
procSnmp.InRedirects = &value
|
||||
case "InEchos":
|
||||
procSnmp.Icmp.InEchos = &value
|
||||
procSnmp.InEchos = &value
|
||||
case "InEchoReps":
|
||||
procSnmp.Icmp.InEchoReps = &value
|
||||
procSnmp.InEchoReps = &value
|
||||
case "InTimestamps":
|
||||
procSnmp.Icmp.InTimestamps = &value
|
||||
procSnmp.InTimestamps = &value
|
||||
case "InTimestampReps":
|
||||
procSnmp.Icmp.InTimestampReps = &value
|
||||
procSnmp.InTimestampReps = &value
|
||||
case "InAddrMasks":
|
||||
procSnmp.Icmp.InAddrMasks = &value
|
||||
procSnmp.InAddrMasks = &value
|
||||
case "InAddrMaskReps":
|
||||
procSnmp.Icmp.InAddrMaskReps = &value
|
||||
procSnmp.InAddrMaskReps = &value
|
||||
case "OutMsgs":
|
||||
procSnmp.Icmp.OutMsgs = &value
|
||||
procSnmp.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp.Icmp.OutErrors = &value
|
||||
procSnmp.OutErrors = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp.Icmp.OutDestUnreachs = &value
|
||||
procSnmp.OutDestUnreachs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp.Icmp.OutTimeExcds = &value
|
||||
procSnmp.OutTimeExcds = &value
|
||||
case "OutParmProbs":
|
||||
procSnmp.Icmp.OutParmProbs = &value
|
||||
procSnmp.OutParmProbs = &value
|
||||
case "OutSrcQuenchs":
|
||||
procSnmp.Icmp.OutSrcQuenchs = &value
|
||||
procSnmp.OutSrcQuenchs = &value
|
||||
case "OutRedirects":
|
||||
procSnmp.Icmp.OutRedirects = &value
|
||||
procSnmp.OutRedirects = &value
|
||||
case "OutEchos":
|
||||
procSnmp.Icmp.OutEchos = &value
|
||||
procSnmp.OutEchos = &value
|
||||
case "OutEchoReps":
|
||||
procSnmp.Icmp.OutEchoReps = &value
|
||||
procSnmp.OutEchoReps = &value
|
||||
case "OutTimestamps":
|
||||
procSnmp.Icmp.OutTimestamps = &value
|
||||
procSnmp.OutTimestamps = &value
|
||||
case "OutTimestampReps":
|
||||
procSnmp.Icmp.OutTimestampReps = &value
|
||||
procSnmp.OutTimestampReps = &value
|
||||
case "OutAddrMasks":
|
||||
procSnmp.Icmp.OutAddrMasks = &value
|
||||
procSnmp.OutAddrMasks = &value
|
||||
case "OutAddrMaskReps":
|
||||
procSnmp.Icmp.OutAddrMaskReps = &value
|
||||
procSnmp.OutAddrMaskReps = &value
|
||||
}
|
||||
case "IcmpMsg":
|
||||
switch key {
|
||||
case "InType3":
|
||||
procSnmp.IcmpMsg.InType3 = &value
|
||||
procSnmp.InType3 = &value
|
||||
case "OutType3":
|
||||
procSnmp.IcmpMsg.OutType3 = &value
|
||||
procSnmp.OutType3 = &value
|
||||
}
|
||||
case "Tcp":
|
||||
switch key {
|
||||
case "RtoAlgorithm":
|
||||
procSnmp.Tcp.RtoAlgorithm = &value
|
||||
procSnmp.RtoAlgorithm = &value
|
||||
case "RtoMin":
|
||||
procSnmp.Tcp.RtoMin = &value
|
||||
procSnmp.RtoMin = &value
|
||||
case "RtoMax":
|
||||
procSnmp.Tcp.RtoMax = &value
|
||||
procSnmp.RtoMax = &value
|
||||
case "MaxConn":
|
||||
procSnmp.Tcp.MaxConn = &value
|
||||
procSnmp.MaxConn = &value
|
||||
case "ActiveOpens":
|
||||
procSnmp.Tcp.ActiveOpens = &value
|
||||
procSnmp.ActiveOpens = &value
|
||||
case "PassiveOpens":
|
||||
procSnmp.Tcp.PassiveOpens = &value
|
||||
procSnmp.PassiveOpens = &value
|
||||
case "AttemptFails":
|
||||
procSnmp.Tcp.AttemptFails = &value
|
||||
procSnmp.AttemptFails = &value
|
||||
case "EstabResets":
|
||||
procSnmp.Tcp.EstabResets = &value
|
||||
procSnmp.EstabResets = &value
|
||||
case "CurrEstab":
|
||||
procSnmp.Tcp.CurrEstab = &value
|
||||
procSnmp.CurrEstab = &value
|
||||
case "InSegs":
|
||||
procSnmp.Tcp.InSegs = &value
|
||||
procSnmp.InSegs = &value
|
||||
case "OutSegs":
|
||||
procSnmp.Tcp.OutSegs = &value
|
||||
procSnmp.OutSegs = &value
|
||||
case "RetransSegs":
|
||||
procSnmp.Tcp.RetransSegs = &value
|
||||
procSnmp.RetransSegs = &value
|
||||
case "InErrs":
|
||||
procSnmp.Tcp.InErrs = &value
|
||||
procSnmp.InErrs = &value
|
||||
case "OutRsts":
|
||||
procSnmp.Tcp.OutRsts = &value
|
||||
procSnmp.OutRsts = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Tcp.InCsumErrors = &value
|
||||
}
|
||||
|
||||
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
@@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||
case "Ip6":
|
||||
switch key {
|
||||
case "InReceives":
|
||||
procSnmp6.Ip6.InReceives = &value
|
||||
procSnmp6.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp6.Ip6.InHdrErrors = &value
|
||||
procSnmp6.InHdrErrors = &value
|
||||
case "InTooBigErrors":
|
||||
procSnmp6.Ip6.InTooBigErrors = &value
|
||||
procSnmp6.InTooBigErrors = &value
|
||||
case "InNoRoutes":
|
||||
procSnmp6.Ip6.InNoRoutes = &value
|
||||
procSnmp6.InNoRoutes = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp6.Ip6.InAddrErrors = &value
|
||||
procSnmp6.InAddrErrors = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp6.Ip6.InUnknownProtos = &value
|
||||
procSnmp6.InUnknownProtos = &value
|
||||
case "InTruncatedPkts":
|
||||
procSnmp6.Ip6.InTruncatedPkts = &value
|
||||
procSnmp6.InTruncatedPkts = &value
|
||||
case "InDiscards":
|
||||
procSnmp6.Ip6.InDiscards = &value
|
||||
procSnmp6.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp6.Ip6.InDelivers = &value
|
||||
procSnmp6.InDelivers = &value
|
||||
case "OutForwDatagrams":
|
||||
procSnmp6.Ip6.OutForwDatagrams = &value
|
||||
procSnmp6.OutForwDatagrams = &value
|
||||
case "OutRequests":
|
||||
procSnmp6.Ip6.OutRequests = &value
|
||||
procSnmp6.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp6.Ip6.OutDiscards = &value
|
||||
procSnmp6.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp6.Ip6.OutNoRoutes = &value
|
||||
procSnmp6.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp6.Ip6.ReasmTimeout = &value
|
||||
procSnmp6.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp6.Ip6.ReasmReqds = &value
|
||||
procSnmp6.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp6.Ip6.ReasmOKs = &value
|
||||
procSnmp6.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp6.Ip6.ReasmFails = &value
|
||||
procSnmp6.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp6.Ip6.FragOKs = &value
|
||||
procSnmp6.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp6.Ip6.FragFails = &value
|
||||
procSnmp6.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp6.Ip6.FragCreates = &value
|
||||
procSnmp6.FragCreates = &value
|
||||
case "InMcastPkts":
|
||||
procSnmp6.Ip6.InMcastPkts = &value
|
||||
procSnmp6.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procSnmp6.Ip6.OutMcastPkts = &value
|
||||
procSnmp6.OutMcastPkts = &value
|
||||
case "InOctets":
|
||||
procSnmp6.Ip6.InOctets = &value
|
||||
procSnmp6.InOctets = &value
|
||||
case "OutOctets":
|
||||
procSnmp6.Ip6.OutOctets = &value
|
||||
procSnmp6.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procSnmp6.Ip6.InMcastOctets = &value
|
||||
procSnmp6.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procSnmp6.Ip6.OutMcastOctets = &value
|
||||
procSnmp6.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procSnmp6.Ip6.InBcastOctets = &value
|
||||
procSnmp6.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procSnmp6.Ip6.OutBcastOctets = &value
|
||||
procSnmp6.OutBcastOctets = &value
|
||||
case "InNoECTPkts":
|
||||
procSnmp6.Ip6.InNoECTPkts = &value
|
||||
procSnmp6.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procSnmp6.Ip6.InECT1Pkts = &value
|
||||
procSnmp6.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procSnmp6.Ip6.InECT0Pkts = &value
|
||||
procSnmp6.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procSnmp6.Ip6.InCEPkts = &value
|
||||
procSnmp6.InCEPkts = &value
|
||||
|
||||
}
|
||||
case "Icmp6":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp6.Icmp6.InMsgs = &value
|
||||
procSnmp6.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp6.Icmp6.InErrors = &value
|
||||
case "OutMsgs":
|
||||
procSnmp6.Icmp6.OutMsgs = &value
|
||||
procSnmp6.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp6.Icmp6.OutErrors = &value
|
||||
procSnmp6.OutErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp6.Icmp6.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp6.Icmp6.InDestUnreachs = &value
|
||||
procSnmp6.InDestUnreachs = &value
|
||||
case "InPktTooBigs":
|
||||
procSnmp6.Icmp6.InPktTooBigs = &value
|
||||
procSnmp6.InPktTooBigs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp6.Icmp6.InTimeExcds = &value
|
||||
procSnmp6.InTimeExcds = &value
|
||||
case "InParmProblems":
|
||||
procSnmp6.Icmp6.InParmProblems = &value
|
||||
procSnmp6.InParmProblems = &value
|
||||
case "InEchos":
|
||||
procSnmp6.Icmp6.InEchos = &value
|
||||
procSnmp6.InEchos = &value
|
||||
case "InEchoReplies":
|
||||
procSnmp6.Icmp6.InEchoReplies = &value
|
||||
procSnmp6.InEchoReplies = &value
|
||||
case "InGroupMembQueries":
|
||||
procSnmp6.Icmp6.InGroupMembQueries = &value
|
||||
procSnmp6.InGroupMembQueries = &value
|
||||
case "InGroupMembResponses":
|
||||
procSnmp6.Icmp6.InGroupMembResponses = &value
|
||||
procSnmp6.InGroupMembResponses = &value
|
||||
case "InGroupMembReductions":
|
||||
procSnmp6.Icmp6.InGroupMembReductions = &value
|
||||
procSnmp6.InGroupMembReductions = &value
|
||||
case "InRouterSolicits":
|
||||
procSnmp6.Icmp6.InRouterSolicits = &value
|
||||
procSnmp6.InRouterSolicits = &value
|
||||
case "InRouterAdvertisements":
|
||||
procSnmp6.Icmp6.InRouterAdvertisements = &value
|
||||
procSnmp6.InRouterAdvertisements = &value
|
||||
case "InNeighborSolicits":
|
||||
procSnmp6.Icmp6.InNeighborSolicits = &value
|
||||
procSnmp6.InNeighborSolicits = &value
|
||||
case "InNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.InNeighborAdvertisements = &value
|
||||
procSnmp6.InNeighborAdvertisements = &value
|
||||
case "InRedirects":
|
||||
procSnmp6.Icmp6.InRedirects = &value
|
||||
procSnmp6.InRedirects = &value
|
||||
case "InMLDv2Reports":
|
||||
procSnmp6.Icmp6.InMLDv2Reports = &value
|
||||
procSnmp6.InMLDv2Reports = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp6.Icmp6.OutDestUnreachs = &value
|
||||
procSnmp6.OutDestUnreachs = &value
|
||||
case "OutPktTooBigs":
|
||||
procSnmp6.Icmp6.OutPktTooBigs = &value
|
||||
procSnmp6.OutPktTooBigs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp6.Icmp6.OutTimeExcds = &value
|
||||
procSnmp6.OutTimeExcds = &value
|
||||
case "OutParmProblems":
|
||||
procSnmp6.Icmp6.OutParmProblems = &value
|
||||
procSnmp6.OutParmProblems = &value
|
||||
case "OutEchos":
|
||||
procSnmp6.Icmp6.OutEchos = &value
|
||||
procSnmp6.OutEchos = &value
|
||||
case "OutEchoReplies":
|
||||
procSnmp6.Icmp6.OutEchoReplies = &value
|
||||
procSnmp6.OutEchoReplies = &value
|
||||
case "OutGroupMembQueries":
|
||||
procSnmp6.Icmp6.OutGroupMembQueries = &value
|
||||
procSnmp6.OutGroupMembQueries = &value
|
||||
case "OutGroupMembResponses":
|
||||
procSnmp6.Icmp6.OutGroupMembResponses = &value
|
||||
procSnmp6.OutGroupMembResponses = &value
|
||||
case "OutGroupMembReductions":
|
||||
procSnmp6.Icmp6.OutGroupMembReductions = &value
|
||||
procSnmp6.OutGroupMembReductions = &value
|
||||
case "OutRouterSolicits":
|
||||
procSnmp6.Icmp6.OutRouterSolicits = &value
|
||||
procSnmp6.OutRouterSolicits = &value
|
||||
case "OutRouterAdvertisements":
|
||||
procSnmp6.Icmp6.OutRouterAdvertisements = &value
|
||||
procSnmp6.OutRouterAdvertisements = &value
|
||||
case "OutNeighborSolicits":
|
||||
procSnmp6.Icmp6.OutNeighborSolicits = &value
|
||||
procSnmp6.OutNeighborSolicits = &value
|
||||
case "OutNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.OutNeighborAdvertisements = &value
|
||||
procSnmp6.OutNeighborAdvertisements = &value
|
||||
case "OutRedirects":
|
||||
procSnmp6.Icmp6.OutRedirects = &value
|
||||
procSnmp6.OutRedirects = &value
|
||||
case "OutMLDv2Reports":
|
||||
procSnmp6.Icmp6.OutMLDv2Reports = &value
|
||||
procSnmp6.OutMLDv2Reports = &value
|
||||
case "InType1":
|
||||
procSnmp6.Icmp6.InType1 = &value
|
||||
procSnmp6.InType1 = &value
|
||||
case "InType134":
|
||||
procSnmp6.Icmp6.InType134 = &value
|
||||
procSnmp6.InType134 = &value
|
||||
case "InType135":
|
||||
procSnmp6.Icmp6.InType135 = &value
|
||||
procSnmp6.InType135 = &value
|
||||
case "InType136":
|
||||
procSnmp6.Icmp6.InType136 = &value
|
||||
procSnmp6.InType136 = &value
|
||||
case "InType143":
|
||||
procSnmp6.Icmp6.InType143 = &value
|
||||
procSnmp6.InType143 = &value
|
||||
case "OutType133":
|
||||
procSnmp6.Icmp6.OutType133 = &value
|
||||
procSnmp6.OutType133 = &value
|
||||
case "OutType135":
|
||||
procSnmp6.Icmp6.OutType135 = &value
|
||||
procSnmp6.OutType135 = &value
|
||||
case "OutType136":
|
||||
procSnmp6.Icmp6.OutType136 = &value
|
||||
procSnmp6.OutType136 = &value
|
||||
case "OutType143":
|
||||
procSnmp6.Icmp6.OutType143 = &value
|
||||
procSnmp6.OutType143 = &value
|
||||
}
|
||||
case "Udp6":
|
||||
switch key {
|
||||
@@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||
case "InCsumErrors":
|
||||
procSnmp6.Udp6.InCsumErrors = &value
|
||||
case "IgnoredMulti":
|
||||
procSnmp6.Udp6.IgnoredMulti = &value
|
||||
procSnmp6.IgnoredMulti = &value
|
||||
}
|
||||
case "UdpLite6":
|
||||
switch key {
|
||||
|
||||
18
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
18
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
||||
}
|
||||
}
|
||||
case "NSpid":
|
||||
s.NSpids = calcNSPidsList(vString)
|
||||
nspids, err := calcNSPidsList(vString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.NSpids = nspids
|
||||
case "VmPeak":
|
||||
s.VmPeak = vUintBytes
|
||||
case "VmSize":
|
||||
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
|
||||
return g
|
||||
}
|
||||
|
||||
func calcNSPidsList(nspidsString string) []uint64 {
|
||||
s := strings.Split(nspidsString, " ")
|
||||
func calcNSPidsList(nspidsString string) ([]uint64, error) {
|
||||
s := strings.Split(nspidsString, "\t")
|
||||
var nspids []uint64
|
||||
|
||||
for _, nspid := range s {
|
||||
nspid, _ := strconv.ParseUint(nspid, 10, 64)
|
||||
if nspid == 0 {
|
||||
continue
|
||||
nspid, err := strconv.ParseUint(nspid, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nspids = append(nspids, nspid)
|
||||
}
|
||||
|
||||
return nspids
|
||||
return nspids, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func sysctlToPath(sysctl string) string {
|
||||
return strings.Replace(sysctl, ".", "/", -1)
|
||||
return strings.ReplaceAll(sysctl, ".", "/")
|
||||
}
|
||||
|
||||
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
||||
|
||||
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
@@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case parts[0] == "HI:":
|
||||
switch parts[0] {
|
||||
case "HI:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Hi = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TIMER:":
|
||||
case "TIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Timer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_TX:":
|
||||
case "NET_TX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_RX:":
|
||||
case "NET_RX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "BLOCK:":
|
||||
case "BLOCK:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Block = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "IRQ_POLL:":
|
||||
case "IRQ_POLL:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TASKLET:":
|
||||
case "TASKLET:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "SCHED:":
|
||||
case "SCHED:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Sched = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "HRTIMER:":
|
||||
case "HRTIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "RCU:":
|
||||
case "RCU:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.RCU = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
||||
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
@@ -1668,8 +1668,8 @@ github.com/prometheus/alertmanager/matcher/parse
|
||||
github.com/prometheus/alertmanager/pkg/labels
|
||||
github.com/prometheus/alertmanager/template
|
||||
github.com/prometheus/alertmanager/types
|
||||
# github.com/prometheus/client_golang v1.22.0
|
||||
## explicit; go 1.22
|
||||
# github.com/prometheus/client_golang v1.23.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
@@ -1680,14 +1680,14 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal
|
||||
# github.com/prometheus/client_model v0.6.2
|
||||
## explicit; go 1.22.0
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.62.0
|
||||
## explicit; go 1.21
|
||||
# github.com/prometheus/common v0.65.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/helpers/templates
|
||||
github.com/prometheus/common/model
|
||||
github.com/prometheus/common/promslog
|
||||
# github.com/prometheus/procfs v0.15.1
|
||||
## explicit; go 1.20
|
||||
# github.com/prometheus/procfs v0.16.1
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
|
||||
Reference in New Issue
Block a user