* Add prometheus collector and route * dep ensure -add github.com/prometheus/client_golang/prometheus * dep ensure -update github.com/golang/protobuf * add metrics to reserved usernames * add comment head in metrics package * fix style imports * add metrics settings * add bearer token check * mapping metrics configs * fix lint * update config cheat sheet * update conf sample, typo fixmaster
@@ -82,6 +82,14 @@ | |||
revision = "349dd0209470eabd9514242c688c403c0926d266" | |||
[[projects]] | |||
branch = "master" | |||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" | |||
name = "github.com/beorn7/perks" | |||
packages = ["quantile"] | |||
pruneopts = "NUT" | |||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb" | |||
[[projects]] | |||
digest = "1:67351095005f164e748a5a21899d1403b03878cb2d40a7b0f742376e6eeda974" | |||
name = "github.com/blevesearch/bleve" | |||
packages = [ | |||
@@ -405,11 +413,12 @@ | |||
revision = "7f3990acf1833faa5ebd0e86f0a4c72a4b5eba3c" | |||
[[projects]] | |||
digest = "1:b64f9be717fdab5f75122dc3868e8ca9d003779b6bc55f64f39a0cddc698bf88" | |||
digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d" | |||
name = "github.com/golang/protobuf" | |||
packages = ["proto"] | |||
pruneopts = "NUT" | |||
revision = "99511271042a09d1e01baea8781caa5210fec66e" | |||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" | |||
version = "v1.2.0" | |||
[[projects]] | |||
digest = "1:60e25fc5f5cfd7783f985ca99b4383e848981dddf0be584db7d809be20848e25" | |||
@@ -574,6 +583,14 @@ | |||
revision = "c7c4067b79cc51e6dfdcef5c702e74b1e0fa7c75" | |||
[[projects]] | |||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" | |||
name = "github.com/matttproud/golang_protobuf_extensions" | |||
packages = ["pbutil"] | |||
pruneopts = "NUT" | |||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" | |||
version = "v1.0.1" | |||
[[projects]] | |||
branch = "master" | |||
digest = "1:1ed0f3c066eb9d1c2ff7a864a6fa595c70b9b49049cc46af6a6f7ff0e4655321" | |||
name = "github.com/mcuadros/go-version" | |||
@@ -645,6 +662,51 @@ | |||
revision = "54653902c20e47f3417541d35435cb6d6162e28a" | |||
[[projects]] | |||
digest = "1:aa2da1df3327c3a338bb42f978407c07de74cd0a5bef35e9411881dffd444214" | |||
name = "github.com/prometheus/client_golang" | |||
packages = [ | |||
"prometheus", | |||
"prometheus/internal", | |||
"prometheus/promhttp", | |||
] | |||
pruneopts = "NUT" | |||
revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4" | |||
version = "v0.9.0" | |||
[[projects]] | |||
branch = "master" | |||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" | |||
name = "github.com/prometheus/client_model" | |||
packages = ["go"] | |||
pruneopts = "NUT" | |||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" | |||
[[projects]] | |||
branch = "master" | |||
digest = "1:06375f3b602de9c99fa99b8484f0e949fd5273e6e9c6592b5a0dd4cd9085f3ea" | |||
name = "github.com/prometheus/common" | |||
packages = [ | |||
"expfmt", | |||
"internal/bitbucket.org/ww/goautoneg", | |||
"model", | |||
] | |||
pruneopts = "NUT" | |||
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" | |||
[[projects]] | |||
branch = "master" | |||
digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6" | |||
name = "github.com/prometheus/procfs" | |||
packages = [ | |||
".", | |||
"internal/util", | |||
"nfs", | |||
"xfs", | |||
] | |||
pruneopts = "NUT" | |||
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" | |||
[[projects]] | |||
branch = "master" | |||
digest = "1:5be01c22bc1040e2f6ce4755d51a0ac9cef823a9f2004fb1f9896a414ef519e6" | |||
name = "github.com/russross/blackfriday" | |||
@@ -981,6 +1043,8 @@ | |||
"github.com/nfnt/resize", | |||
"github.com/pquerna/otp", | |||
"github.com/pquerna/otp/totp", | |||
"github.com/prometheus/client_golang/prometheus", | |||
"github.com/prometheus/client_golang/prometheus/promhttp", | |||
"github.com/russross/blackfriday", | |||
"github.com/satori/go.uuid", | |||
"github.com/sergi/go-diff/diffmatchpatch", | |||
@@ -107,3 +107,7 @@ ignored = ["google.golang.org/appengine*"] | |||
[[override]] | |||
revision = "c10ba270aa0bf8b8c1c986e103859c67a9103061" | |||
name = "golang.org/x/oauth2" | |||
[[constraint]] | |||
name = "github.com/prometheus/client_golang" | |||
version = "0.9.0" |
@@ -659,3 +659,9 @@ FILE_EXTENSIONS = .adoc,.asciidoc | |||
RENDER_COMMAND = "asciidoc --out-file=- -" | |||
; Don't pass the file on STDIN, pass the filename as argument instead. | |||
IS_INPUT_FILE = false | |||
[metrics] | |||
; Enables metrics endpoint. True or false; default is false. | |||
ENABLED = false | |||
; If you want to add authorization, specify a token here | |||
TOKEN = |
@@ -302,6 +302,11 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`. | |||
- `PULL`: **300**: Git pull from internal repositories timeout seconds. | |||
- `GC`: **60**: Git repository GC timeout seconds. | |||
## Metrics (`metrics`) | |||
- `ENABLED`: **false**: Enables /metrics endpoint for prometheus. | |||
- `TOKEN`: **\<empty\>**: You need to specify the token, if you want to include in the authorization the metrics . The same token need to be used in prometheus parameters `bearer_token` or `bearer_token_file`. | |||
## API (`api`) | |||
- `ENABLE_SWAGGER_ENDPOINT`: **true**: Enables /api/swagger, /api/v1/swagger etc. endpoints. True or false; default is true. | |||
@@ -698,6 +698,7 @@ var ( | |||
"issues", | |||
"js", | |||
"less", | |||
"metrics", | |||
"new", | |||
"org", | |||
"plugins", | |||
@@ -0,0 +1,299 @@ | |||
// Copyright 2018 The Gitea Authors. All rights reserved. | |||
// Use of this source code is governed by a MIT-style | |||
// license that can be found in the LICENSE file. | |||
package metrics | |||
import ( | |||
"code.gitea.io/gitea/models" | |||
"github.com/prometheus/client_golang/prometheus" | |||
) | |||
const namespace = "gitea_" | |||
// Collector implements the prometheus.Collector interface and | |||
// exposes gitea metrics for prometheus | |||
type Collector struct { | |||
Accesses *prometheus.Desc | |||
Actions *prometheus.Desc | |||
Attachments *prometheus.Desc | |||
Comments *prometheus.Desc | |||
Follows *prometheus.Desc | |||
HookTasks *prometheus.Desc | |||
Issues *prometheus.Desc | |||
Labels *prometheus.Desc | |||
LoginSources *prometheus.Desc | |||
Milestones *prometheus.Desc | |||
Mirrors *prometheus.Desc | |||
Oauths *prometheus.Desc | |||
Organizations *prometheus.Desc | |||
PublicKeys *prometheus.Desc | |||
Releases *prometheus.Desc | |||
Repositories *prometheus.Desc | |||
Stars *prometheus.Desc | |||
Teams *prometheus.Desc | |||
UpdateTasks *prometheus.Desc | |||
Users *prometheus.Desc | |||
Watches *prometheus.Desc | |||
Webhooks *prometheus.Desc | |||
} | |||
// NewCollector returns a new Collector with all prometheus.Desc initialized | |||
func NewCollector() Collector { | |||
return Collector{ | |||
Accesses: prometheus.NewDesc( | |||
namespace+"accesses", | |||
"Number of Accesses", | |||
nil, nil, | |||
), | |||
Actions: prometheus.NewDesc( | |||
namespace+"actions", | |||
"Number of Actions", | |||
nil, nil, | |||
), | |||
Attachments: prometheus.NewDesc( | |||
namespace+"attachments", | |||
"Number of Attachments", | |||
nil, nil, | |||
), | |||
Comments: prometheus.NewDesc( | |||
namespace+"comments", | |||
"Number of Comments", | |||
nil, nil, | |||
), | |||
Follows: prometheus.NewDesc( | |||
namespace+"follows", | |||
"Number of Follows", | |||
nil, nil, | |||
), | |||
HookTasks: prometheus.NewDesc( | |||
namespace+"hooktasks", | |||
"Number of HookTasks", | |||
nil, nil, | |||
), | |||
Issues: prometheus.NewDesc( | |||
namespace+"issues", | |||
"Number of Issues", | |||
nil, nil, | |||
), | |||
Labels: prometheus.NewDesc( | |||
namespace+"labels", | |||
"Number of Labels", | |||
nil, nil, | |||
), | |||
LoginSources: prometheus.NewDesc( | |||
namespace+"loginsources", | |||
"Number of LoginSources", | |||
nil, nil, | |||
), | |||
Milestones: prometheus.NewDesc( | |||
namespace+"milestones", | |||
"Number of Milestones", | |||
nil, nil, | |||
), | |||
Mirrors: prometheus.NewDesc( | |||
namespace+"mirrors", | |||
"Number of Mirrors", | |||
nil, nil, | |||
), | |||
Oauths: prometheus.NewDesc( | |||
namespace+"oauths", | |||
"Number of Oauths", | |||
nil, nil, | |||
), | |||
Organizations: prometheus.NewDesc( | |||
namespace+"organizations", | |||
"Number of Organizations", | |||
nil, nil, | |||
), | |||
PublicKeys: prometheus.NewDesc( | |||
namespace+"publickeys", | |||
"Number of PublicKeys", | |||
nil, nil, | |||
), | |||
Releases: prometheus.NewDesc( | |||
namespace+"releases", | |||
"Number of Releases", | |||
nil, nil, | |||
), | |||
Repositories: prometheus.NewDesc( | |||
namespace+"repositories", | |||
"Number of Repositories", | |||
nil, nil, | |||
), | |||
Stars: prometheus.NewDesc( | |||
namespace+"stars", | |||
"Number of Stars", | |||
nil, nil, | |||
), | |||
Teams: prometheus.NewDesc( | |||
namespace+"teams", | |||
"Number of Teams", | |||
nil, nil, | |||
), | |||
UpdateTasks: prometheus.NewDesc( | |||
namespace+"updatetasks", | |||
"Number of UpdateTasks", | |||
nil, nil, | |||
), | |||
Users: prometheus.NewDesc( | |||
namespace+"users", | |||
"Number of Users", | |||
nil, nil, | |||
), | |||
Watches: prometheus.NewDesc( | |||
namespace+"watches", | |||
"Number of Watches", | |||
nil, nil, | |||
), | |||
Webhooks: prometheus.NewDesc( | |||
namespace+"webhooks", | |||
"Number of Webhooks", | |||
nil, nil, | |||
), | |||
} | |||
} | |||
// Describe returns all possible prometheus.Desc | |||
func (c Collector) Describe(ch chan<- *prometheus.Desc) { | |||
ch <- c.Accesses | |||
ch <- c.Actions | |||
ch <- c.Attachments | |||
ch <- c.Comments | |||
ch <- c.Follows | |||
ch <- c.HookTasks | |||
ch <- c.Issues | |||
ch <- c.Labels | |||
ch <- c.LoginSources | |||
ch <- c.Milestones | |||
ch <- c.Mirrors | |||
ch <- c.Oauths | |||
ch <- c.Organizations | |||
ch <- c.PublicKeys | |||
ch <- c.Releases | |||
ch <- c.Repositories | |||
ch <- c.Stars | |||
ch <- c.Teams | |||
ch <- c.UpdateTasks | |||
ch <- c.Users | |||
ch <- c.Watches | |||
ch <- c.Webhooks | |||
} | |||
// Collect returns the metrics with values | |||
func (c Collector) Collect(ch chan<- prometheus.Metric) { | |||
stats := models.GetStatistic() | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Accesses, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Access), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Actions, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Action), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Attachments, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Attachment), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Comments, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Comment), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Follows, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Follow), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.HookTasks, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.HookTask), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Issues, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Issue), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Labels, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Label), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.LoginSources, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.LoginSource), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Milestones, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Milestone), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Mirrors, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Mirror), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Oauths, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Oauth), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Organizations, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Org), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.PublicKeys, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.PublicKey), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Releases, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Release), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Repositories, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Repo), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Stars, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Star), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Teams, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Team), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.UpdateTasks, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.UpdateTask), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Users, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.User), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Watches, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Watch), | |||
) | |||
ch <- prometheus.MustNewConstMetric( | |||
c.Webhooks, | |||
prometheus.GaugeValue, | |||
float64(stats.Counter.Webhook), | |||
) | |||
} |
@@ -561,6 +561,15 @@ var ( | |||
TrustedFacets []string | |||
}{} | |||
// Metrics settings | |||
Metrics = struct { | |||
Enabled bool | |||
Token string | |||
}{ | |||
Enabled: false, | |||
Token: "", | |||
} | |||
// I18n settings | |||
Langs []string | |||
Names []string | |||
@@ -1125,6 +1134,8 @@ func NewContext() { | |||
log.Fatal(4, "Failed to map Git settings: %v", err) | |||
} else if err = Cfg.Section("api").MapTo(&API); err != nil { | |||
log.Fatal(4, "Failed to map API settings: %v", err) | |||
} else if err = Cfg.Section("metrics").MapTo(&Metrics); err != nil { | |||
log.Fatal(4, "Failed to map Metrics settings: %v", err) | |||
} | |||
sec = Cfg.Section("mirror") | |||
@@ -0,0 +1,30 @@ | |||
// Copyright 2018 The Gitea Authors. All rights reserved. | |||
// Use of this source code is governed by a MIT-style | |||
// license that can be found in the LICENSE file. | |||
package routers | |||
import ( | |||
"github.com/prometheus/client_golang/prometheus/promhttp" | |||
"code.gitea.io/gitea/modules/context" | |||
"code.gitea.io/gitea/modules/setting" | |||
) | |||
// Metrics validate auth token and render prometheus metrics | |||
func Metrics(ctx *context.Context) { | |||
if setting.Metrics.Token == "" { | |||
promhttp.Handler().ServeHTTP(ctx.Resp, ctx.Req.Request) | |||
return | |||
} | |||
header := ctx.Header().Get("Authorization") | |||
if header == "" { | |||
ctx.Error(401) | |||
return | |||
} | |||
if header != "Bearer "+setting.Metrics.Token { | |||
ctx.Error(401) | |||
return | |||
} | |||
promhttp.Handler().ServeHTTP(ctx.Resp, ctx.Req.Request) | |||
} |
@@ -16,6 +16,7 @@ import ( | |||
"code.gitea.io/gitea/modules/context" | |||
"code.gitea.io/gitea/modules/lfs" | |||
"code.gitea.io/gitea/modules/log" | |||
"code.gitea.io/gitea/modules/metrics" | |||
"code.gitea.io/gitea/modules/options" | |||
"code.gitea.io/gitea/modules/public" | |||
"code.gitea.io/gitea/modules/setting" | |||
@@ -39,6 +40,7 @@ import ( | |||
"github.com/go-macaron/i18n" | |||
"github.com/go-macaron/session" | |||
"github.com/go-macaron/toolbox" | |||
"github.com/prometheus/client_golang/prometheus" | |||
"github.com/tstranex/u2f" | |||
"gopkg.in/macaron.v1" | |||
) | |||
@@ -788,6 +790,14 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
} | |||
}) | |||
// prometheus metrics endpoint | |||
if setting.Metrics.Enabled { | |||
c := metrics.NewCollector() | |||
prometheus.MustRegister(c) | |||
m.Get("/metrics", routers.Metrics) | |||
} | |||
// Not found handler. | |||
m.NotFound(routers.NotFound) | |||
} |
@@ -0,0 +1,20 @@ | |||
Copyright (C) 2013 Blake Mizerany | |||
Permission is hereby granted, free of charge, to any person obtaining | |||
a copy of this software and associated documentation files (the | |||
"Software"), to deal in the Software without restriction, including | |||
without limitation the rights to use, copy, modify, merge, publish, | |||
distribute, sublicense, and/or sell copies of the Software, and to | |||
permit persons to whom the Software is furnished to do so, subject to | |||
the following conditions: | |||
The above copyright notice and this permission notice shall be | |||
included in all copies or substantial portions of the Software. | |||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | |||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
@@ -0,0 +1,316 @@ | |||
// Package quantile computes approximate quantiles over an unbounded data | |||
// stream within low memory and CPU bounds. | |||
// | |||
// A small amount of accuracy is traded to achieve the above properties. | |||
// | |||
// Multiple streams can be merged before calling Query to generate a single set | |||
// of results. This is meaningful when the streams represent the same type of | |||
// data. See Merge and Samples. | |||
// | |||
// For more detailed information about the algorithm used, see: | |||
// | |||
// Effective Computation of Biased Quantiles over Data Streams | |||
// | |||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf | |||
package quantile | |||
import ( | |||
"math" | |||
"sort" | |||
) | |||
// Sample holds an observed value and meta information for compression. JSON | |||
// tags have been added for convenience. | |||
type Sample struct { | |||
Value float64 `json:",string"` | |||
Width float64 `json:",string"` | |||
Delta float64 `json:",string"` | |||
} | |||
// Samples represents a slice of samples. It implements sort.Interface. | |||
type Samples []Sample | |||
func (a Samples) Len() int { return len(a) } | |||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } | |||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | |||
type invariant func(s *stream, r float64) float64 | |||
// NewLowBiased returns an initialized Stream for low-biased quantiles | |||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but | |||
// error guarantees can still be given even for the lower ranks of the data | |||
// distribution. | |||
// | |||
// The provided epsilon is a relative error, i.e. the true quantile of a value | |||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. | |||
// | |||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error | |||
// properties. | |||
func NewLowBiased(epsilon float64) *Stream { | |||
ƒ := func(s *stream, r float64) float64 { | |||
return 2 * epsilon * r | |||
} | |||
return newStream(ƒ) | |||
} | |||
// NewHighBiased returns an initialized Stream for high-biased quantiles | |||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but | |||
// error guarantees can still be given even for the higher ranks of the data | |||
// distribution. | |||
// | |||
// The provided epsilon is a relative error, i.e. the true quantile of a value | |||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). | |||
// | |||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error | |||
// properties. | |||
func NewHighBiased(epsilon float64) *Stream { | |||
ƒ := func(s *stream, r float64) float64 { | |||
return 2 * epsilon * (s.n - r) | |||
} | |||
return newStream(ƒ) | |||
} | |||
// NewTargeted returns an initialized Stream concerned with a particular set of | |||
// quantile values that are supplied a priori. Knowing these a priori reduces | |||
// space and computation time. The targets map maps the desired quantiles to | |||
// their absolute errors, i.e. the true quantile of a value returned by a query | |||
// is guaranteed to be within (Quantile±Epsilon). | |||
// | |||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. | |||
func NewTargeted(targetMap map[float64]float64) *Stream { | |||
// Convert map to slice to avoid slow iterations on a map. | |||
// ƒ is called on the hot path, so converting the map to a slice | |||
// beforehand results in significant CPU savings. | |||
targets := targetMapToSlice(targetMap) | |||
ƒ := func(s *stream, r float64) float64 { | |||
var m = math.MaxFloat64 | |||
var f float64 | |||
for _, t := range targets { | |||
if t.quantile*s.n <= r { | |||
f = (2 * t.epsilon * r) / t.quantile | |||
} else { | |||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) | |||
} | |||
if f < m { | |||
m = f | |||
} | |||
} | |||
return m | |||
} | |||
return newStream(ƒ) | |||
} | |||
type target struct { | |||
quantile float64 | |||
epsilon float64 | |||
} | |||
func targetMapToSlice(targetMap map[float64]float64) []target { | |||
targets := make([]target, 0, len(targetMap)) | |||
for quantile, epsilon := range targetMap { | |||
t := target{ | |||
quantile: quantile, | |||
epsilon: epsilon, | |||
} | |||
targets = append(targets, t) | |||
} | |||
return targets | |||
} | |||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by | |||
// design. Take care when using across multiple goroutines. | |||
type Stream struct { | |||
*stream | |||
b Samples | |||
sorted bool | |||
} | |||
func newStream(ƒ invariant) *Stream { | |||
x := &stream{ƒ: ƒ} | |||
return &Stream{x, make(Samples, 0, 500), true} | |||
} | |||
// Insert inserts v into the stream. | |||
func (s *Stream) Insert(v float64) { | |||
s.insert(Sample{Value: v, Width: 1}) | |||
} | |||
func (s *Stream) insert(sample Sample) { | |||
s.b = append(s.b, sample) | |||
s.sorted = false | |||
if len(s.b) == cap(s.b) { | |||
s.flush() | |||
} | |||
} | |||
// Query returns the computed qth percentiles value. If s was created with | |||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query | |||
// will return an unspecified result. | |||
func (s *Stream) Query(q float64) float64 { | |||
if !s.flushed() { | |||
// Fast path when there hasn't been enough data for a flush; | |||
// this also yields better accuracy for small sets of data. | |||
l := len(s.b) | |||
if l == 0 { | |||
return 0 | |||
} | |||
i := int(math.Ceil(float64(l) * q)) | |||
if i > 0 { | |||
i -= 1 | |||
} | |||
s.maybeSort() | |||
return s.b[i].Value | |||
} | |||
s.flush() | |||
return s.stream.query(q) | |||
} | |||
// Merge merges samples into the underlying streams samples. This is handy when | |||
// merging multiple streams from separate threads, database shards, etc. | |||
// | |||
// ATTENTION: This method is broken and does not yield correct results. The | |||
// underlying algorithm is not capable of merging streams correctly. | |||
func (s *Stream) Merge(samples Samples) { | |||
sort.Sort(samples) | |||
s.stream.merge(samples) | |||
} | |||
// Reset reinitializes and clears the list reusing the samples buffer memory. | |||
func (s *Stream) Reset() { | |||
s.stream.reset() | |||
s.b = s.b[:0] | |||
} | |||
// Samples returns stream samples held by s. | |||
func (s *Stream) Samples() Samples { | |||
if !s.flushed() { | |||
return s.b | |||
} | |||
s.flush() | |||
return s.stream.samples() | |||
} | |||
// Count returns the total number of samples observed in the stream | |||
// since initialization. | |||
func (s *Stream) Count() int { | |||
return len(s.b) + s.stream.count() | |||
} | |||
func (s *Stream) flush() { | |||
s.maybeSort() | |||
s.stream.merge(s.b) | |||
s.b = s.b[:0] | |||
} | |||
func (s *Stream) maybeSort() { | |||
if !s.sorted { | |||
s.sorted = true | |||
sort.Sort(s.b) | |||
} | |||
} | |||
func (s *Stream) flushed() bool { | |||
return len(s.stream.l) > 0 | |||
} | |||
type stream struct { | |||
n float64 | |||
l []Sample | |||
ƒ invariant | |||
} | |||
func (s *stream) reset() { | |||
s.l = s.l[:0] | |||
s.n = 0 | |||
} | |||
func (s *stream) insert(v float64) { | |||
s.merge(Samples{{v, 1, 0}}) | |||
} | |||
func (s *stream) merge(samples Samples) { | |||
// TODO(beorn7): This tries to merge not only individual samples, but | |||
// whole summaries. The paper doesn't mention merging summaries at | |||
// all. Unittests show that the merging is inaccurate. Find out how to | |||
// do merges properly. | |||
var r float64 | |||
i := 0 | |||
for _, sample := range samples { | |||
for ; i < len(s.l); i++ { | |||
c := s.l[i] | |||
if c.Value > sample.Value { | |||
// Insert at position i. | |||
s.l = append(s.l, Sample{}) | |||
copy(s.l[i+1:], s.l[i:]) | |||
s.l[i] = Sample{ | |||
sample.Value, | |||
sample.Width, | |||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), | |||
// TODO(beorn7): How to calculate delta correctly? | |||
} | |||
i++ | |||
goto inserted | |||
} | |||
r += c.Width | |||
} | |||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) | |||
i++ | |||
inserted: | |||
s.n += sample.Width | |||
r += sample.Width | |||
} | |||
s.compress() | |||
} | |||
func (s *stream) count() int { | |||
return int(s.n) | |||
} | |||
func (s *stream) query(q float64) float64 { | |||
t := math.Ceil(q * s.n) | |||
t += math.Ceil(s.ƒ(s, t) / 2) | |||
p := s.l[0] | |||
var r float64 | |||
for _, c := range s.l[1:] { | |||
r += p.Width | |||
if r+c.Width+c.Delta > t { | |||
return p.Value | |||
} | |||
p = c | |||
} | |||
return p.Value | |||
} | |||
func (s *stream) compress() { | |||
if len(s.l) < 2 { | |||
return | |||
} | |||
x := s.l[len(s.l)-1] | |||
xi := len(s.l) - 1 | |||
r := s.n - 1 - x.Width | |||
for i := len(s.l) - 2; i >= 0; i-- { | |||
c := s.l[i] | |||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { | |||
x.Width += c.Width | |||
s.l[xi] = x | |||
// Remove element at i. | |||
copy(s.l[i:], s.l[i+1:]) | |||
s.l = s.l[:len(s.l)-1] | |||
xi -= 1 | |||
} else { | |||
x = c | |||
xi = i | |||
} | |||
r -= c.Width | |||
} | |||
} | |||
func (s *stream) samples() Samples { | |||
samples := make(Samples, len(s.l)) | |||
copy(samples, s.l) | |||
return samples | |||
} |
@@ -1,7 +1,4 @@ | |||
Go support for Protocol Buffers - Google's data interchange format | |||
Copyright 2010 The Go Authors. All rights reserved. | |||
https://github.com/golang/protobuf | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are | |||
@@ -35,22 +35,39 @@ | |||
package proto | |||
import ( | |||
"fmt" | |||
"log" | |||
"reflect" | |||
"strings" | |||
) | |||
// Clone returns a deep copy of a protocol buffer. | |||
func Clone(pb Message) Message { | |||
in := reflect.ValueOf(pb) | |||
func Clone(src Message) Message { | |||
in := reflect.ValueOf(src) | |||
if in.IsNil() { | |||
return pb | |||
return src | |||
} | |||
out := reflect.New(in.Type().Elem()) | |||
// out is empty so a merge is a deep copy. | |||
mergeStruct(out.Elem(), in.Elem()) | |||
return out.Interface().(Message) | |||
dst := out.Interface().(Message) | |||
Merge(dst, src) | |||
return dst | |||
} | |||
// Merger is the interface representing objects that can merge messages of the same type. | |||
type Merger interface { | |||
// Merge merges src into this message. | |||
// Required and optional fields that are set in src will be set to that value in dst. | |||
// Elements of repeated fields will be appended. | |||
// | |||
// Merge may panic if called with a different argument type than the receiver. | |||
Merge(src Message) | |||
} | |||
// generatedMerger is the custom merge method that generated protos will have. | |||
// We must add this method since a generate Merge method will conflict with | |||
// many existing protos that have a Merge data field already defined. | |||
type generatedMerger interface { | |||
XXX_Merge(src Message) | |||
} | |||
// Merge merges src into dst. | |||
@@ -58,17 +75,24 @@ func Clone(pb Message) Message { | |||
// Elements of repeated fields will be appended. | |||
// Merge panics if src and dst are not the same type, or if dst is nil. | |||
func Merge(dst, src Message) { | |||
if m, ok := dst.(Merger); ok { | |||
m.Merge(src) | |||
return | |||
} | |||
in := reflect.ValueOf(src) | |||
out := reflect.ValueOf(dst) | |||
if out.IsNil() { | |||
panic("proto: nil destination") | |||
} | |||
if in.Type() != out.Type() { | |||
// Explicit test prior to mergeStruct so that mistyped nils will fail | |||
panic("proto: type mismatch") | |||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) | |||
} | |||
if in.IsNil() { | |||
// Merging nil into non-nil is a quiet no-op | |||
return // Merge from nil src is a noop | |||
} | |||
if m, ok := dst.(generatedMerger); ok { | |||
m.XXX_Merge(src) | |||
return | |||
} | |||
mergeStruct(out.Elem(), in.Elem()) | |||
@@ -84,9 +108,15 @@ func mergeStruct(out, in reflect.Value) { | |||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) | |||
} | |||
if emIn, ok := in.Addr().Interface().(extendableProto); ok { | |||
emOut := out.Addr().Interface().(extendableProto) | |||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) | |||
if emIn, err := extendable(in.Addr().Interface()); err == nil { | |||
emOut, _ := extendable(out.Addr().Interface()) | |||
mIn, muIn := emIn.extensionsRead() | |||
if mIn != nil { | |||
mOut := emOut.extensionsWrite() | |||
muIn.Lock() | |||
mergeExtension(mOut, mIn) | |||
muIn.Unlock() | |||
} | |||
} | |||
uf := in.FieldByName("XXX_unrecognized") | |||
@@ -39,8 +39,6 @@ import ( | |||
"errors" | |||
"fmt" | |||
"io" | |||
"os" | |||
"reflect" | |||
) | |||
// errOverflow is returned when an integer is too large to be represented. | |||
@@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") | |||
// wire type is encountered. It does not get returned to user code. | |||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") | |||
// The fundamental decoders that interpret bytes on the wire. | |||
// Those that take integer types all return uint64 and are | |||
// therefore of type valueDecoder. | |||
// DecodeVarint reads a varint-encoded integer from the slice. | |||
// It returns the integer and the number of bytes consumed, or | |||
// zero if there is not enough. | |||
@@ -61,7 +55,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for | |||
// int32, int64, uint32, uint64, bool, and enum | |||
// protocol buffer types. | |||
func DecodeVarint(buf []byte) (x uint64, n int) { | |||
// x, n already 0 | |||
for shift := uint(0); shift < 64; shift += 7 { | |||
if n >= len(buf) { | |||
return 0, 0 | |||
@@ -78,13 +71,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { | |||
return 0, 0 | |||
} | |||
// DecodeVarint reads a varint-encoded integer from the Buffer. | |||
// This is the format for the | |||
// int32, int64, uint32, uint64, bool, and enum | |||
// protocol buffer types. | |||
func (p *Buffer) DecodeVarint() (x uint64, err error) { | |||
// x, err already 0 | |||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) { | |||
i := p.index | |||
l := len(p.buf) | |||
@@ -107,6 +94,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { | |||
return | |||
} | |||
// DecodeVarint reads a varint-encoded integer from the Buffer. | |||
// This is the format for the | |||
// int32, int64, uint32, uint64, bool, and enum | |||
// protocol buffer types. | |||
func (p *Buffer) DecodeVarint() (x uint64, err error) { | |||
i := p.index | |||
buf := p.buf | |||
if i >= len(buf) { | |||
return 0, io.ErrUnexpectedEOF | |||
} else if buf[i] < 0x80 { | |||
p.index++ | |||
return uint64(buf[i]), nil | |||
} else if len(buf)-i < 10 { | |||
return p.decodeVarintSlow() | |||
} | |||
var b uint64 | |||
// we already checked the first byte | |||
x = uint64(buf[i]) - 0x80 | |||
i++ | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 7 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 7 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 14 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 14 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 21 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 21 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 28 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 28 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 35 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 35 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 42 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 42 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 49 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 49 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 56 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
x -= 0x80 << 56 | |||
b = uint64(buf[i]) | |||
i++ | |||
x += b << 63 | |||
if b&0x80 == 0 { | |||
goto done | |||
} | |||
// x -= 0x80 << 63 // Always zero. | |||
return 0, errOverflow | |||
done: | |||
p.index = i | |||
return x, nil | |||
} | |||
// DecodeFixed64 reads a 64-bit integer from the Buffer. | |||
// This is the format for the | |||
// fixed64, sfixed64, and double protocol buffer types. | |||
@@ -173,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { | |||
return | |||
} | |||
// These are not ValueDecoders: they produce an array of bytes or a string. | |||
// bytes, embedded messages | |||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. | |||
// This is the format used for the bytes protocol buffer | |||
// type and for embedded messages. | |||
@@ -217,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { | |||
return string(buf), nil | |||
} | |||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. | |||
// If the protocol buffer has extensions, and the field matches, add it as an extension. | |||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. | |||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { | |||
oi := o.index | |||
err := o.skip(t, tag, wire) | |||
if err != nil { | |||
return err | |||
} | |||
if !unrecField.IsValid() { | |||
return nil | |||
} | |||
ptr := structPointer_Bytes(base, unrecField) | |||
// Add the skipped field to struct field | |||
obuf := o.buf | |||
o.buf = *ptr | |||
o.EncodeVarint(uint64(tag<<3 | wire)) | |||
*ptr = append(o.buf, obuf[oi:o.index]...) | |||
o.buf = obuf | |||
return nil | |||
} | |||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. | |||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error { | |||
var u uint64 | |||
var err error | |||
switch wire { | |||
case WireVarint: | |||
_, err = o.DecodeVarint() | |||
case WireFixed64: | |||
_, err = o.DecodeFixed64() | |||
case WireBytes: | |||
_, err = o.DecodeRawBytes(false) | |||
case WireFixed32: | |||
_, err = o.DecodeFixed32() | |||
case WireStartGroup: | |||
for { | |||
u, err = o.DecodeVarint() | |||
if err != nil { | |||
break | |||
} | |||
fwire := int(u & 0x7) | |||
if fwire == WireEndGroup { | |||
break | |||
} | |||
ftag := int(u >> 3) | |||
err = o.skip(t, ftag, fwire) | |||
if err != nil { | |||
break | |||
} | |||
} | |||
default: | |||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) | |||
} | |||
return err | |||
} | |||
// Unmarshaler is the interface representing objects that can | |||
// unmarshal themselves. The method should reset the receiver before | |||
// decoding starts. The argument points to data that may be | |||
// unmarshal themselves. The argument points to data that may be | |||
// overwritten, so implementations should not keep references to the | |||
// buffer. | |||
// Unmarshal implementations should not clear the receiver. | |||
// Any unmarshaled data should be merged into the receiver. | |||
// Callers of Unmarshal that do not want to retain existing data | |||
// should Reset the receiver before calling Unmarshal. | |||
type Unmarshaler interface { | |||
Unmarshal([]byte) error | |||
} | |||
// newUnmarshaler is the interface representing objects that can | |||
// unmarshal themselves. The semantics are identical to Unmarshaler. | |||
// | |||
// This exists to support protoc-gen-go generated messages. | |||
// The proto package will stop type-asserting to this interface in the future. | |||
// | |||
// DO NOT DEPEND ON THIS. | |||
type newUnmarshaler interface { | |||
XXX_Unmarshal([]byte) error | |||
} | |||
// Unmarshal parses the protocol buffer representation in buf and places the | |||
// decoded result in pb. If the struct underlying pb does not match | |||
// the data in buf, the results can be unpredictable. | |||
@@ -301,7 +334,13 @@ type Unmarshaler interface { | |||
// to preserve and append to existing data. | |||
func Unmarshal(buf []byte, pb Message) error { | |||
pb.Reset() | |||
return UnmarshalMerge(buf, pb) | |||
if u, ok := pb.(newUnmarshaler); ok { | |||
return u.XXX_Unmarshal(buf) | |||
} | |||
if u, ok := pb.(Unmarshaler); ok { | |||
return u.Unmarshal(buf) | |||
} | |||
return NewBuffer(buf).Unmarshal(pb) | |||
} | |||
// UnmarshalMerge parses the protocol buffer representation in buf and | |||
@@ -311,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { | |||
// UnmarshalMerge merges into existing data in pb. | |||
// Most code should use Unmarshal instead. | |||
func UnmarshalMerge(buf []byte, pb Message) error { | |||
// If the object can unmarshal itself, let it. | |||
if u, ok := pb.(newUnmarshaler); ok { | |||
return u.XXX_Unmarshal(buf) | |||
} | |||
if u, ok := pb.(Unmarshaler); ok { | |||
// NOTE: The history of proto have unfortunately been inconsistent | |||
// whether Unmarshaler should or should not implicitly clear itself. | |||
// Some implementations do, most do not. | |||
// Thus, calling this here may or may not do what people want. | |||
// | |||
// See https://github.com/golang/protobuf/issues/424 | |||
return u.Unmarshal(buf) | |||
} | |||
return NewBuffer(buf).Unmarshal(pb) | |||
@@ -328,541 +375,54 @@ func (p *Buffer) DecodeMessage(pb Message) error { | |||
} | |||
// DecodeGroup reads a tag-delimited group from the Buffer. | |||
// StartGroup tag is already consumed. This function consumes | |||
// EndGroup tag. | |||
func (p *Buffer) DecodeGroup(pb Message) error { | |||
typ, base, err := getbase(pb) | |||
if err != nil { | |||
return err | |||
b := p.buf[p.index:] | |||
x, y := findEndGroup(b) | |||
if x < 0 { | |||
return io.ErrUnexpectedEOF | |||
} | |||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) | |||
err := Unmarshal(b[:x], pb) | |||
p.index += y | |||
return err | |||
} | |||
// Unmarshal parses the protocol buffer representation in the | |||
// Buffer and places the decoded result in pb. If the struct | |||
// underlying pb does not match the data in the buffer, the results can be | |||
// unpredictable. | |||
// | |||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. | |||
func (p *Buffer) Unmarshal(pb Message) error { | |||
// If the object can unmarshal itself, let it. | |||
if u, ok := pb.(Unmarshaler); ok { | |||
err := u.Unmarshal(p.buf[p.index:]) | |||
if u, ok := pb.(newUnmarshaler); ok { | |||
err := u.XXX_Unmarshal(p.buf[p.index:]) | |||
p.index = len(p.buf) | |||
return err | |||
} | |||
typ, base, err := getbase(pb) | |||
if err != nil { | |||
return err | |||
} | |||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) | |||
if collectStats { | |||
stats.Decode++ | |||
} | |||
return err | |||
} | |||
// unmarshalType does the work of unmarshaling a structure. | |||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { | |||
var state errorState | |||
required, reqFields := prop.reqCount, uint64(0) | |||
var err error | |||
for err == nil && o.index < len(o.buf) { | |||
oi := o.index | |||
var u uint64 | |||
u, err = o.DecodeVarint() | |||
if err != nil { | |||
break | |||
} | |||
wire := int(u & 0x7) | |||
if wire == WireEndGroup { | |||
if is_group { | |||
return nil // input is satisfied | |||
} | |||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st) | |||
} | |||
tag := int(u >> 3) | |||
if tag <= 0 { | |||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) | |||
} | |||
fieldnum, ok := prop.decoderTags.get(tag) | |||
if !ok { | |||
// Maybe it's an extension? | |||
if prop.extendable { | |||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { | |||
if err = o.skip(st, tag, wire); err == nil { | |||
ext := e.ExtensionMap()[int32(tag)] // may be missing | |||
ext.enc = append(ext.enc, o.buf[oi:o.index]...) | |||
e.ExtensionMap()[int32(tag)] = ext | |||
} | |||
continue | |||
} | |||
} | |||
// Maybe it's a oneof? | |||
if prop.oneofUnmarshaler != nil { | |||
m := structPointer_Interface(base, st).(Message) | |||
// First return value indicates whether tag is a oneof field. | |||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o) | |||
if err == ErrInternalBadWireType { | |||
// Map the error to something more descriptive. | |||
// Do the formatting here to save generated code space. | |||
err = fmt.Errorf("bad wiretype for oneof field in %T", m) | |||
} | |||
if ok { | |||
continue | |||
} | |||
} | |||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField) | |||
continue | |||
} | |||
p := prop.Prop[fieldnum] | |||
if p.dec == nil { | |||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) | |||
continue | |||
} | |||
dec := p.dec | |||
if wire != WireStartGroup && wire != p.WireType { | |||
if wire == WireBytes && p.packedDec != nil { | |||
// a packable field | |||
dec = p.packedDec | |||
} else { | |||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) | |||
continue | |||
} | |||
} | |||
decErr := dec(o, p, base) | |||
if decErr != nil && !state.shouldContinue(decErr, p) { | |||
err = decErr | |||
} | |||
if err == nil && p.Required { | |||
// Successfully decoded a required field. | |||
if tag <= 64 { | |||
// use bitmap for fields 1-64 to catch field reuse. | |||
var mask uint64 = 1 << uint64(tag-1) | |||
if reqFields&mask == 0 { | |||
// new required field | |||
reqFields |= mask | |||
required-- | |||
} | |||
} else { | |||
// This is imprecise. It can be fooled by a required field | |||
// with a tag > 64 that is encoded twice; that's very rare. | |||
// A fully correct implementation would require allocating | |||
// a data structure, which we would like to avoid. | |||
required-- | |||
} | |||
} | |||
} | |||
if err == nil { | |||
if is_group { | |||
return io.ErrUnexpectedEOF | |||
} | |||
if state.err != nil { | |||
return state.err | |||
} | |||
if required > 0 { | |||
// Not enough information to determine the exact field. If we use extra | |||
// CPU, we could determine the field only if the missing required field | |||
// has a tag <= 64 and we check reqFields. | |||
return &RequiredNotSetError{"{Unknown}"} | |||
} | |||
} | |||
return err | |||
} | |||
// Individual type decoders | |||
// For each, | |||
// u is the decoded value, | |||
// v is a pointer to the field (pointer) in the struct | |||
// Sizes of the pools to allocate inside the Buffer. | |||
// The goal is modest amortization and allocation | |||
// on at least 16-byte boundaries. | |||
const ( | |||
boolPoolSize = 16 | |||
uint32PoolSize = 8 | |||
uint64PoolSize = 4 | |||
) | |||
// Decode a bool. | |||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
if len(o.bools) == 0 { | |||
o.bools = make([]bool, boolPoolSize) | |||
} | |||
o.bools[0] = u != 0 | |||
*structPointer_Bool(base, p.field) = &o.bools[0] | |||
o.bools = o.bools[1:] | |||
return nil | |||
} | |||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
*structPointer_BoolVal(base, p.field) = u != 0 | |||
return nil | |||
} | |||
// Decode an int32. | |||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) | |||
return nil | |||
} | |||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) | |||
return nil | |||
} | |||
// Decode an int64. | |||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
word64_Set(structPointer_Word64(base, p.field), o, u) | |||
return nil | |||
} | |||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u) | |||
return nil | |||
} | |||
// Decode a string. | |||
func (o *Buffer) dec_string(p *Properties, base structPointer) error { | |||
s, err := o.DecodeStringBytes() | |||
if err != nil { | |||
return err | |||
} | |||
*structPointer_String(base, p.field) = &s | |||
return nil | |||
} | |||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { | |||
s, err := o.DecodeStringBytes() | |||
if err != nil { | |||
return err | |||
} | |||
*structPointer_StringVal(base, p.field) = s | |||
return nil | |||
} | |||
// Decode a slice of bytes ([]byte). | |||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { | |||
b, err := o.DecodeRawBytes(true) | |||
if err != nil { | |||
return err | |||
} | |||
*structPointer_Bytes(base, p.field) = b | |||
return nil | |||
} | |||
// Decode a slice of bools ([]bool). | |||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
v := structPointer_BoolSlice(base, p.field) | |||
*v = append(*v, u != 0) | |||
return nil | |||
} | |||
// Decode a slice of bools ([]bool) in packed format. | |||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { | |||
v := structPointer_BoolSlice(base, p.field) | |||
nn, err := o.DecodeVarint() | |||
if err != nil { | |||
return err | |||
} | |||
nb := int(nn) // number of bytes of encoded bools | |||
fin := o.index + nb | |||
if fin < o.index { | |||
return errOverflow | |||
} | |||
y := *v | |||
for o.index < fin { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
y = append(y, u != 0) | |||
} | |||
*v = y | |||
return nil | |||
} | |||
// Decode a slice of int32s ([]int32). | |||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
structPointer_Word32Slice(base, p.field).Append(uint32(u)) | |||
return nil | |||
} | |||
// Decode a slice of int32s ([]int32) in packed format. | |||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { | |||
v := structPointer_Word32Slice(base, p.field) | |||
nn, err := o.DecodeVarint() | |||
if err != nil { | |||
return err | |||
} | |||
nb := int(nn) // number of bytes of encoded int32s | |||
fin := o.index + nb | |||
if fin < o.index { | |||
return errOverflow | |||
} | |||
for o.index < fin { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
v.Append(uint32(u)) | |||
} | |||
return nil | |||
} | |||
// Decode a slice of int64s ([]int64). | |||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
structPointer_Word64Slice(base, p.field).Append(u) | |||
return nil | |||
} | |||
// Decode a slice of int64s ([]int64) in packed format. | |||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { | |||
v := structPointer_Word64Slice(base, p.field) | |||
nn, err := o.DecodeVarint() | |||
if err != nil { | |||
return err | |||
} | |||
nb := int(nn) // number of bytes of encoded int64s | |||
fin := o.index + nb | |||
if fin < o.index { | |||
return errOverflow | |||
} | |||
for o.index < fin { | |||
u, err := p.valDec(o) | |||
if err != nil { | |||
return err | |||
} | |||
v.Append(u) | |||
} | |||
return nil | |||
} | |||
// Decode a slice of strings ([]string). | |||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { | |||
s, err := o.DecodeStringBytes() | |||
if err != nil { | |||
return err | |||
} | |||
v := structPointer_StringSlice(base, p.field) | |||
*v = append(*v, s) | |||
return nil | |||
} | |||
// Decode a slice of slice of bytes ([][]byte). | |||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { | |||
b, err := o.DecodeRawBytes(true) | |||
if err != nil { | |||
return err | |||
} | |||
v := structPointer_BytesSlice(base, p.field) | |||
*v = append(*v, b) | |||
return nil | |||
} | |||
// Decode a map field. | |||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { | |||
raw, err := o.DecodeRawBytes(false) | |||
if err != nil { | |||
return err | |||
} | |||
oi := o.index // index at the end of this map entry | |||
o.index -= len(raw) // move buffer back to start of map entry | |||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V | |||
if mptr.Elem().IsNil() { | |||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) | |||
} | |||
v := mptr.Elem() // map[K]V | |||
// Prepare addressable doubly-indirect placeholders for the key and value types. | |||
// See enc_new_map for why. | |||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K | |||
keybase := toStructPointer(keyptr.Addr()) // **K | |||
var valbase structPointer | |||
var valptr reflect.Value | |||
switch p.mtype.Elem().Kind() { | |||
case reflect.Slice: | |||
// []byte | |||
var dummy []byte | |||
valptr = reflect.ValueOf(&dummy) // *[]byte | |||
valbase = toStructPointer(valptr) // *[]byte | |||
case reflect.Ptr: | |||
// message; valptr is **Msg; need to allocate the intermediate pointer | |||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V | |||
valptr.Set(reflect.New(valptr.Type().Elem())) | |||
valbase = toStructPointer(valptr) | |||
default: | |||
// everything else | |||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V | |||
valbase = toStructPointer(valptr.Addr()) // **V | |||
} | |||
// Decode. | |||
// This parses a restricted wire format, namely the encoding of a message | |||
// with two fields. See enc_new_map for the format. | |||
for o.index < oi { | |||
// tagcode for key and value properties are always a single byte | |||
// because they have tags 1 and 2. | |||
tagcode := o.buf[o.index] | |||
o.index++ | |||
switch tagcode { | |||
case p.mkeyprop.tagcode[0]: | |||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { | |||
return err | |||
} | |||
case p.mvalprop.tagcode[0]: | |||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { | |||
return err | |||
} | |||
default: | |||
// TODO: Should we silently skip this instead? | |||
return fmt.Errorf("proto: bad map data tag %d", raw[0]) | |||
} | |||
} | |||
keyelem, valelem := keyptr.Elem(), valptr.Elem() | |||
if !keyelem.IsValid() { | |||
keyelem = reflect.Zero(p.mtype.Key()) | |||
} | |||
if !valelem.IsValid() { | |||
valelem = reflect.Zero(p.mtype.Elem()) | |||
} | |||
v.SetMapIndex(keyelem, valelem) | |||
return nil | |||
} | |||
// Decode a group. | |||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { | |||
bas := structPointer_GetStructPointer(base, p.field) | |||
if structPointer_IsNil(bas) { | |||
// allocate new nested message | |||
bas = toStructPointer(reflect.New(p.stype)) | |||
structPointer_SetStructPointer(base, p.field, bas) | |||
} | |||
return o.unmarshalType(p.stype, p.sprop, true, bas) | |||
} | |||
// Decode an embedded message. | |||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { | |||
raw, e := o.DecodeRawBytes(false) | |||
if e != nil { | |||
return e | |||
} | |||
bas := structPointer_GetStructPointer(base, p.field) | |||
if structPointer_IsNil(bas) { | |||
// allocate new nested message | |||
bas = toStructPointer(reflect.New(p.stype)) | |||
structPointer_SetStructPointer(base, p.field, bas) | |||
} | |||
// If the object can unmarshal itself, let it. | |||
if p.isUnmarshaler { | |||
iv := structPointer_Interface(bas, p.stype) | |||
return iv.(Unmarshaler).Unmarshal(raw) | |||
} | |||
obuf := o.buf | |||
oi := o.index | |||
o.buf = raw | |||
o.index = 0 | |||
err = o.unmarshalType(p.stype, p.sprop, false, bas) | |||
o.buf = obuf | |||
o.index = oi | |||
return err | |||
} | |||
// Decode a slice of embedded messages. | |||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { | |||
return o.dec_slice_struct(p, false, base) | |||
} | |||
// Decode a slice of embedded groups. | |||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { | |||
return o.dec_slice_struct(p, true, base) | |||
} | |||
// Decode a slice of structs ([]*struct). | |||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { | |||
v := reflect.New(p.stype) | |||
bas := toStructPointer(v) | |||
structPointer_StructPointerSlice(base, p.field).Append(bas) | |||
if is_group { | |||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas) | |||
return err | |||
} | |||
raw, err := o.DecodeRawBytes(false) | |||
if err != nil { | |||
if u, ok := pb.(Unmarshaler); ok { | |||
// NOTE: The history of proto have unfortunately been inconsistent | |||
// whether Unmarshaler should or should not implicitly clear itself. | |||
// Some implementations do, most do not. | |||
// Thus, calling this here may or may not do what people want. | |||
// | |||
// See https://github.com/golang/protobuf/issues/424 | |||
err := u.Unmarshal(p.buf[p.index:]) | |||
p.index = len(p.buf) | |||
return err | |||
} | |||
// If the object can unmarshal itself, let it. | |||
if p.isUnmarshaler { | |||
iv := v.Interface() | |||
return iv.(Unmarshaler).Unmarshal(raw) | |||
} | |||
obuf := o.buf | |||
oi := o.index | |||
o.buf = raw | |||
o.index = 0 | |||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas) | |||
o.buf = obuf | |||
o.index = oi | |||
// Slow workaround for messages that aren't Unmarshalers. | |||
// This includes some hand-coded .pb.go files and | |||
// bootstrap protos. | |||
// TODO: fix all of those and then add Unmarshal to | |||
// the Message interface. Then: | |||
// The cast above and code below can be deleted. | |||
// The old unmarshaler can be deleted. | |||
// Clients can call Unmarshal directly (can already do that, actually). | |||
var info InternalMessageInfo | |||
err := info.Unmarshal(pb, p.buf[p.index:]) | |||
p.index = len(p.buf) | |||
return err | |||
} |
@@ -0,0 +1,350 @@ | |||
// Go support for Protocol Buffers - Google's data interchange format | |||
// | |||
// Copyright 2017 The Go Authors. All rights reserved. | |||
// https://github.com/golang/protobuf | |||
// | |||
// Redistribution and use in source and binary forms, with or without | |||
// modification, are permitted provided that the following conditions are | |||
// met: | |||
// | |||
// * Redistributions of source code must retain the above copyright | |||
// notice, this list of conditions and the following disclaimer. | |||
// * Redistributions in binary form must reproduce the above | |||
// copyright notice, this list of conditions and the following disclaimer | |||
// in the documentation and/or other materials provided with the | |||
// distribution. | |||
// * Neither the name of Google Inc. nor the names of its | |||
// contributors may be used to endorse or promote products derived from | |||
// this software without specific prior written permission. | |||
// | |||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
package proto | |||
import ( | |||
"fmt" | |||
"reflect" | |||
"strings" | |||
"sync" | |||
"sync/atomic" | |||
) | |||
type generatedDiscarder interface { | |||
XXX_DiscardUnknown() | |||
} | |||
// DiscardUnknown recursively discards all unknown fields from this message | |||
// and all embedded messages. | |||
// | |||
// When unmarshaling a message with unrecognized fields, the tags and values | |||
// of such fields are preserved in the Message. This allows a later call to | |||
// marshal to be able to produce a message that continues to have those | |||
// unrecognized fields. To avoid this, DiscardUnknown is used to | |||
// explicitly clear the unknown fields after unmarshaling. | |||
// | |||
// For proto2 messages, the unknown fields of message extensions are only | |||
// discarded from messages that have been accessed via GetExtension. | |||
func DiscardUnknown(m Message) { | |||
if m, ok := m.(generatedDiscarder); ok { | |||
m.XXX_DiscardUnknown() | |||
return | |||
} | |||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages, | |||
// but the master branch has no implementation for InternalMessageInfo, | |||
// so it would be more work to replicate that approach. | |||
discardLegacy(m) | |||
} | |||
// DiscardUnknown recursively discards all unknown fields. | |||
func (a *InternalMessageInfo) DiscardUnknown(m Message) { | |||
di := atomicLoadDiscardInfo(&a.discard) | |||
if di == nil { | |||
di = getDiscardInfo(reflect.TypeOf(m).Elem()) | |||
atomicStoreDiscardInfo(&a.discard, di) | |||
} | |||
di.discard(toPointer(&m)) | |||
} | |||
type discardInfo struct { | |||
typ reflect.Type | |||
initialized int32 // 0: only typ is valid, 1: everything is valid | |||
lock sync.Mutex | |||
fields []discardFieldInfo | |||
unrecognized field | |||
} | |||
type discardFieldInfo struct { | |||
field field // Offset of field, guaranteed to be valid | |||
discard func(src pointer) | |||
} | |||
var ( | |||
discardInfoMap = map[reflect.Type]*discardInfo{} | |||
discardInfoLock sync.Mutex | |||
) | |||
func getDiscardInfo(t reflect.Type) *discardInfo { | |||
discardInfoLock.Lock() | |||
defer discardInfoLock.Unlock() | |||
di := discardInfoMap[t] | |||
if di == nil { | |||
di = &discardInfo{typ: t} | |||
discardInfoMap[t] = di | |||
} | |||
return di | |||
} | |||
func (di *discardInfo) discard(src pointer) { | |||
if src.isNil() { | |||
return // Nothing to do. | |||
} | |||
if atomic.LoadInt32(&di.initialized) == 0 { | |||
di.computeDiscardInfo() | |||
} | |||
for _, fi := range di.fields { | |||
sfp := src.offset(fi.field) | |||
fi.discard(sfp) | |||
} | |||
// For proto2 messages, only discard unknown fields in message extensions | |||
// that have been accessed via GetExtension. | |||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { | |||
// Ignore lock since DiscardUnknown is not concurrency safe. | |||
emm, _ := em.extensionsRead() | |||
for _, mx := range emm { | |||
if m, ok := mx.value.(Message); ok { | |||
DiscardUnknown(m) | |||
} | |||
} | |||
} | |||
if di.unrecognized.IsValid() { | |||
*src.offset(di.unrecognized).toBytes() = nil | |||
} | |||
} | |||
func (di *discardInfo) computeDiscardInfo() { | |||
di.lock.Lock() | |||
defer di.lock.Unlock() | |||
if di.initialized != 0 { | |||
return | |||
} | |||
t := di.typ | |||
n := t.NumField() | |||
for i := 0; i < n; i++ { | |||
f := t.Field(i) | |||
if strings.HasPrefix(f.Name, "XXX_") { | |||
continue | |||
} | |||
dfi := discardFieldInfo{field: toField(&f)} | |||
tf := f.Type | |||
// Unwrap tf to get its most basic type. | |||
var isPointer, isSlice bool | |||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { | |||
isSlice = true | |||
tf = tf.Elem() | |||
} | |||
if tf.Kind() == reflect.Ptr { | |||
isPointer = true | |||
tf = tf.Elem() | |||
} | |||
if isPointer && isSlice && tf.Kind() != reflect.Struct { | |||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) | |||
} | |||
switch tf.Kind() { | |||
case reflect.Struct: | |||
switch { | |||
case !isPointer: | |||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) | |||
case isSlice: // E.g., []*pb.T | |||
di := getDiscardInfo(tf) | |||
dfi.discard = func(src pointer) { | |||
sps := src.getPointerSlice() | |||
for _, sp := range sps { | |||
if !sp.isNil() { | |||
di.discard(sp) | |||
} | |||
} | |||
} | |||
default: // E.g., *pb.T | |||
di := getDiscardInfo(tf) | |||
dfi.discard = func(src pointer) { | |||
sp := src.getPointer() | |||
if !sp.isNil() { | |||
di.discard(sp) | |||
} | |||
} | |||
} | |||
case reflect.Map: | |||
switch { | |||
case isPointer || isSlice: | |||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) | |||
default: // E.g., map[K]V | |||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) | |||
dfi.discard = func(src pointer) { | |||
sm := src.asPointerTo(tf).Elem() | |||
if sm.Len() == 0 { | |||
return | |||
} | |||
for _, key := range sm.MapKeys() { | |||
val := sm.MapIndex(key) | |||
DiscardUnknown(val.Interface().(Message)) | |||
} | |||
} | |||
} else { | |||
dfi.discard = func(pointer) {} // Noop | |||
} | |||
} | |||
case reflect.Interface: | |||
// Must be oneof field. | |||
switch { | |||
case isPointer || isSlice: | |||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) | |||
default: // E.g., interface{} | |||
// TODO: Make this faster? | |||
dfi.discard = func(src pointer) { | |||
su := src.asPointerTo(tf).Elem() | |||
if !su.IsNil() { | |||
sv := su.Elem().Elem().Field(0) | |||
if sv.Kind() == reflect.Ptr && sv.IsNil() { | |||
return | |||
} | |||
switch sv.Type().Kind() { | |||
case reflect.Ptr: // Proto struct (e.g., *T) | |||
DiscardUnknown(sv.Interface().(Message)) | |||
} | |||
} | |||
} | |||
} | |||
default: | |||
continue | |||
} | |||
di.fields = append(di.fields, dfi) | |||
} | |||
di.unrecognized = invalidField | |||
if f, ok := t.FieldByName("XXX_unrecognized"); ok { | |||
if f.Type != reflect.TypeOf([]byte{}) { | |||
panic("expected XXX_unrecognized to be of type []byte") | |||
} | |||
di.unrecognized = toField(&f) | |||
} | |||
atomic.StoreInt32(&di.initialized, 1) | |||
} | |||
func discardLegacy(m Message) { | |||
v := reflect.ValueOf(m) | |||
if v.Kind() != reflect.Ptr || v.IsNil() { | |||
return | |||
} | |||
v = v.Elem() | |||
if v.Kind() != reflect.Struct { | |||
return | |||
} | |||
t := v.Type() | |||
for i := 0; i < v.NumField(); i++ { | |||
f := t.Field(i) | |||
if strings.HasPrefix(f.Name, "XXX_") { | |||
continue | |||
} | |||
vf := v.Field(i) | |||
tf := f.Type | |||
// Unwrap tf to get its most basic type. | |||
var isPointer, isSlice bool | |||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { | |||
isSlice = true | |||
tf = tf.Elem() | |||
} | |||
if tf.Kind() == reflect.Ptr { | |||
isPointer = true | |||
tf = tf.Elem() | |||
} | |||
if isPointer && isSlice && tf.Kind() != reflect.Struct { | |||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) | |||
} | |||
switch tf.Kind() { | |||
case reflect.Struct: | |||
switch { | |||
case !isPointer: | |||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) | |||
case isSlice: // E.g., []*pb.T | |||
for j := 0; j < vf.Len(); j++ { | |||
discardLegacy(vf.Index(j).Interface().(Message)) | |||
} | |||
default: // E.g., *pb.T | |||
discardLegacy(vf.Interface().(Message)) | |||
} | |||
case reflect.Map: | |||
switch { | |||
case isPointer || isSlice: | |||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) | |||
default: // E.g., map[K]V | |||
tv := vf.Type().Elem() | |||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) | |||
for _, key := range vf.MapKeys() { | |||
val := vf.MapIndex(key) | |||
discardLegacy(val.Interface().(Message)) | |||
} | |||
} | |||
} | |||
case reflect.Interface: | |||
// Must be oneof field. | |||
switch { | |||
case isPointer || isSlice: | |||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) | |||
default: // E.g., test_proto.isCommunique_Union interface | |||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { | |||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg | |||
if !vf.IsNil() { | |||
vf = vf.Elem() // E.g., test_proto.Communique_Msg | |||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value | |||
if vf.Kind() == reflect.Ptr { | |||
discardLegacy(vf.Interface().(Message)) | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { | |||
if vf.Type() != reflect.TypeOf([]byte{}) { | |||
panic("expected XXX_unrecognized to be of type []byte") | |||
} | |||
vf.Set(reflect.ValueOf([]byte(nil))) | |||
} | |||
// For proto2 messages, only discard unknown fields in message extensions | |||
// that have been accessed via GetExtension. | |||
if em, err := extendable(m); err == nil { | |||
// Ignore lock since discardLegacy is not concurrency safe. | |||
emm, _ := em.extensionsRead() | |||
for _, mx := range emm { | |||
if m, ok := mx.value.(Message); ok { | |||
discardLegacy(m) | |||
} | |||
} | |||
} | |||
} |
@@ -54,13 +54,17 @@ Equality is defined in this way: | |||
in a proto3 .proto file, fields are not "set"; specifically, | |||
zero length proto3 "bytes" fields are equal (nil == {}). | |||
- Two repeated fields are equal iff their lengths are the same, | |||
and their corresponding elements are equal (a "bytes" field, | |||
although represented by []byte, is not a repeated field) | |||
and their corresponding elements are equal. Note a "bytes" field, | |||
although represented by []byte, is not a repeated field and the | |||
rule for the scalar fields described above applies. | |||
- Two unset fields are equal. | |||
- Two unknown field sets are equal if their current | |||
encoded state is equal. | |||
- Two extension sets are equal iff they have corresponding | |||
elements that are pairwise equal. | |||
- Two map fields are equal iff their lengths are the same, | |||
and they contain the same set of elements. Zero-length map | |||
fields are equal. | |||
- Every other combination of things are not equal. | |||
The return value is undefined if a and b are not protocol buffers. | |||
@@ -105,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { | |||
// set/unset mismatch | |||
return false | |||
} | |||
b1, ok := f1.Interface().(raw) | |||
if ok { | |||
b2 := f2.Interface().(raw) | |||
// RawMessage | |||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) { | |||
return false | |||
} | |||
continue | |||
} | |||
f1, f2 = f1.Elem(), f2.Elem() | |||
} | |||
if !equalAny(f1, f2, sprop.Prop[i]) { | |||
@@ -121,9 +116,16 @@ func equalStruct(v1, v2 reflect.Value) bool { | |||
} | |||
} | |||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { | |||
em2 := v2.FieldByName("XXX_InternalExtensions") | |||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { | |||
return false | |||
} | |||
} | |||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { | |||
em2 := v2.FieldByName("XXX_extensions") | |||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { | |||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { | |||
return false | |||
} | |||
} | |||
@@ -135,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { | |||
u1 := uf.Bytes() | |||
u2 := v2.FieldByName("XXX_unrecognized").Bytes() | |||
if !bytes.Equal(u1, u2) { | |||
return false | |||
} | |||
return true | |||
return bytes.Equal(u1, u2) | |||
} | |||
// v1 and v2 are known to have the same type. | |||
@@ -184,6 +182,13 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool { | |||
} | |||
return true | |||
case reflect.Ptr: | |||
// Maps may have nil values in them, so check for nil. | |||
if v1.IsNil() && v2.IsNil() { | |||
return true | |||
} | |||
if v1.IsNil() != v2.IsNil() { | |||
return false | |||
} | |||
return equalAny(v1.Elem(), v2.Elem(), prop) | |||
case reflect.Slice: | |||
if v1.Type().Elem().Kind() == reflect.Uint8 { | |||
@@ -223,8 +228,14 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool { | |||
} | |||
// base is the struct type that the extensions are based on. | |||
// em1 and em2 are extension maps. | |||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { | |||
// x1 and x2 are InternalExtensions. | |||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { | |||
em1, _ := x1.extensionsRead() | |||
em2, _ := x2.extensionsRead() | |||
return equalExtMap(base, em1, em2) | |||
} | |||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { | |||
if len(em1) != len(em2) { | |||
return false | |||
} | |||
@@ -237,6 +248,15 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { | |||
m1, m2 := e1.value, e2.value | |||
if m1 == nil && m2 == nil { | |||
// Both have only encoded form. | |||
if bytes.Equal(e1.enc, e2.enc) { | |||
continue | |||
} | |||
// The bytes are different, but the extensions might still be | |||
// equal. We need to decode them to compare. | |||
} | |||
if m1 != nil && m2 != nil { | |||
// Both are unencoded. | |||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { | |||
@@ -252,8 +272,12 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { | |||
desc = m[extNum] | |||
} | |||
if desc == nil { | |||
// If both have only encoded form and the bytes are the same, | |||
// it is handled above. We get here when the bytes are different. | |||
// We don't know how to decode it, so just compare them as byte | |||
// slices. | |||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) | |||
continue | |||
return false | |||
} | |||
var err error | |||
if m1 == nil { | |||
@@ -38,6 +38,7 @@ package proto | |||
import ( | |||
"errors" | |||
"fmt" | |||
"io" | |||
"reflect" | |||
"strconv" | |||
"sync" | |||
@@ -52,14 +53,111 @@ type ExtensionRange struct { | |||
Start, End int32 // both inclusive | |||
} | |||
// extendableProto is an interface implemented by any protocol buffer that may be extended. | |||
// extendableProto is an interface implemented by any protocol buffer generated by the current | |||
// proto compiler that may be extended. | |||
type extendableProto interface { | |||
Message | |||
ExtensionRangeArray() []ExtensionRange | |||
extensionsWrite() map[int32]Extension | |||
extensionsRead() (map[int32]Extension, sync.Locker) | |||
} | |||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous | |||
// version of the proto compiler that may be extended. | |||
type extendableProtoV1 interface { | |||
Message | |||
ExtensionRangeArray() []ExtensionRange | |||
ExtensionMap() map[int32]Extension | |||
} | |||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() | |||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. | |||
type extensionAdapter struct { | |||
extendableProtoV1 | |||
} | |||
func (e extensionAdapter) extensionsWrite() map[int32]Extension { | |||
return e.ExtensionMap() | |||
} | |||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { | |||
return e.ExtensionMap(), notLocker{} | |||
} | |||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops. | |||
type notLocker struct{} | |||
func (n notLocker) Lock() {} | |||
func (n notLocker) Unlock() {} | |||
// extendable returns the extendableProto interface for the given generated proto message. | |||
// If the proto message has the old extension format, it returns a wrapper that implements | |||
// the extendableProto interface. | |||
func extendable(p interface{}) (extendableProto, error) { | |||
switch p := p.(type) { | |||
case extendableProto: | |||
if isNilPtr(p) { | |||
return nil, fmt.Errorf("proto: nil %T is not extendable", p) | |||
} | |||
return p, nil | |||
case extendableProtoV1: | |||
if isNilPtr(p) { | |||
return nil, fmt.Errorf("proto: nil %T is not extendable", p) | |||
} | |||
return extensionAdapter{p}, nil | |||
} | |||
// Don't allocate a specific error containing %T: | |||
// this is the hot path for Clone and MarshalText. | |||
return nil, errNotExtendable | |||
} | |||
var errNotExtendable = errors.New("proto: not an extendable proto.Message") | |||
func isNilPtr(x interface{}) bool { | |||
v := reflect.ValueOf(x) | |||
return v.Kind() == reflect.Ptr && v.IsNil() | |||
} | |||
// XXX_InternalExtensions is an internal representation of proto extensions. | |||
// | |||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, | |||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package. | |||
// | |||
// The methods of XXX_InternalExtensions are not concurrency safe in general, | |||
// but calls to logically read-only methods such as has and get may be executed concurrently. | |||
type XXX_InternalExtensions struct { | |||
// The struct must be indirect so that if a user inadvertently copies a | |||
// generated message and its embedded XXX_InternalExtensions, they | |||
// avoid the mayhem of a copied mutex. | |||
// | |||
// The mutex serializes all logically read-only operations to p.extensionMap. | |||
// It is up to the client to ensure that write operations to p.extensionMap are | |||
// mutually exclusive with other accesses. | |||
p *struct { | |||
mu sync.Mutex | |||
extensionMap map[int32]Extension | |||
} | |||
} | |||
// extensionsWrite returns the extension map, creating it on first use. | |||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { | |||
if e.p == nil { | |||
e.p = new(struct { | |||
mu sync.Mutex | |||
extensionMap map[int32]Extension | |||
}) | |||
e.p.extensionMap = make(map[int32]Extension) | |||
} | |||
return e.p.extensionMap | |||
} | |||
// extensionsRead returns the extensions map for read-only use. It may be nil. | |||
// The caller must hold the returned mutex's lock when accessing Elements within the map. | |||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { | |||
if e.p == nil { | |||
return nil, nil | |||
} | |||
return e.p.extensionMap, &e.p.mu | |||
} | |||
// ExtensionDesc represents an extension specification. | |||
// Used in generated code from the protocol compiler. | |||
@@ -69,6 +167,7 @@ type ExtensionDesc struct { | |||
Field int32 // field number | |||
Name string // fully-qualified name of extension, for text formatting | |||
Tag string // protobuf tag style | |||
Filename string // name of the file in which the extension is defined | |||
} | |||
func (ed *ExtensionDesc) repeated() bool { | |||
@@ -92,8 +191,13 @@ type Extension struct { | |||
} | |||
// SetRawExtension is for testing only. | |||
func SetRawExtension(base extendableProto, id int32, b []byte) { | |||
base.ExtensionMap()[id] = Extension{enc: b} | |||
func SetRawExtension(base Message, id int32, b []byte) { | |||
epb, err := extendable(base) | |||
if err != nil { | |||
return | |||
} | |||
extmap := epb.extensionsWrite() | |||
extmap[id] = Extension{enc: b} | |||
} | |||
// isExtensionField returns true iff the given field number is in an extension range. | |||
@@ -108,9 +212,13 @@ func isExtensionField(pb extendableProto, field int32) bool { | |||
// checkExtensionTypes checks that the given extension is valid for pb. | |||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { | |||
var pbi interface{} = pb | |||
// Check the extended type. | |||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { | |||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) | |||
if ea, ok := pbi.(extensionAdapter); ok { | |||
pbi = ea.extendableProtoV1 | |||
} | |||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { | |||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) | |||
} | |||
// Check the range. | |||
if !isExtensionField(pb, extension.Field) { | |||
@@ -155,80 +263,62 @@ func extensionProperties(ed *ExtensionDesc) *Properties { | |||
return prop | |||
} | |||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. | |||
func encodeExtensionMap(m map[int32]Extension) error { | |||
for k, e := range m { | |||
if e.value == nil || e.desc == nil { | |||
// Extension is only in its encoded form. | |||
continue | |||
} | |||
// We don't skip extensions that have an encoded form set, | |||
// because the extension value may have been mutated after | |||
// the last time this function was called. | |||
et := reflect.TypeOf(e.desc.ExtensionType) | |||
props := extensionProperties(e.desc) | |||
p := NewBuffer(nil) | |||
// If e.value has type T, the encoder expects a *struct{ X T }. | |||
// Pass a *T with a zero field and hope it all works out. | |||
x := reflect.New(et) | |||
x.Elem().Set(reflect.ValueOf(e.value)) | |||
if err := props.enc(p, props, toStructPointer(x)); err != nil { | |||
return err | |||
} | |||
e.enc = p.buf | |||
m[k] = e | |||
} | |||
return nil | |||
} | |||
func sizeExtensionMap(m map[int32]Extension) (n int) { | |||
for _, e := range m { | |||
if e.value == nil || e.desc == nil { | |||
// Extension is only in its encoded form. | |||
n += len(e.enc) | |||
continue | |||
} | |||
// We don't skip extensions that have an encoded form set, | |||
// because the extension value may have been mutated after | |||
// the last time this function was called. | |||
et := reflect.TypeOf(e.desc.ExtensionType) | |||
props := extensionProperties(e.desc) | |||
// If e.value has type T, the encoder expects a *struct{ X T }. | |||
// Pass a *T with a zero field and hope it all works out. | |||
x := reflect.New(et) | |||
x.Elem().Set(reflect.ValueOf(e.value)) | |||
n += props.size(props, toStructPointer(x)) | |||
} | |||
return | |||
} | |||
// HasExtension returns whether the given extension is present in pb. | |||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { | |||
func HasExtension(pb Message, extension *ExtensionDesc) bool { | |||
// TODO: Check types, field numbers, etc.? | |||
_, ok := pb.ExtensionMap()[extension.Field] | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return false | |||
} | |||
extmap, mu := epb.extensionsRead() | |||
if extmap == nil { | |||
return false | |||
} | |||
mu.Lock() | |||
_, ok := extmap[extension.Field] | |||
mu.Unlock() | |||
return ok | |||
} | |||
// ClearExtension removes the given extension from pb. | |||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) { | |||
func ClearExtension(pb Message, extension *ExtensionDesc) { | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return | |||
} | |||
// TODO: Check types, field numbers, etc.? | |||
delete(pb.ExtensionMap(), extension.Field) | |||
extmap := epb.extensionsWrite() | |||
delete(extmap, extension.Field) | |||
} | |||
// GetExtension parses and returns the given extension of pb. | |||
// If the extension is not present and has no default value it returns ErrMissingExtension. | |||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { | |||
if err := checkExtensionTypes(pb, extension); err != nil { | |||
// GetExtension retrieves a proto2 extended field from pb. | |||
// | |||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), | |||
// then GetExtension parses the encoded field and returns a Go value of the specified type. | |||
// If the field is not present, then the default value is returned (if one is specified), | |||
// otherwise ErrMissingExtension is reported. | |||
// | |||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), | |||
// then GetExtension returns the raw encoded bytes of the field extension. | |||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return nil, err | |||
} | |||
emap := pb.ExtensionMap() | |||
if extension.ExtendedType != nil { | |||
// can only check type if this is a complete descriptor | |||
if err := checkExtensionTypes(epb, extension); err != nil { | |||
return nil, err | |||
} | |||
} | |||
emap, mu := epb.extensionsRead() | |||
if emap == nil { | |||
return defaultExtensionValue(extension) | |||
} | |||
mu.Lock() | |||
defer mu.Unlock() | |||
e, ok := emap[extension.Field] | |||
if !ok { | |||
// defaultExtensionValue returns the default value or | |||
@@ -247,6 +337,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er | |||
return e.value, nil | |||
} | |||
if extension.ExtensionType == nil { | |||
// incomplete descriptor | |||
return e.enc, nil | |||
} | |||
v, err := decodeExtension(e.enc, extension) | |||
if err != nil { | |||
return nil, err | |||
@@ -264,6 +359,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er | |||
// defaultExtensionValue returns the default value for extension. | |||
// If no default for an extension is defined ErrMissingExtension is returned. | |||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { | |||
if extension.ExtensionType == nil { | |||
// incomplete descriptor, so no default | |||
return nil, ErrMissingExtension | |||
} | |||
t := reflect.TypeOf(extension.ExtensionType) | |||
props := extensionProperties(extension) | |||
@@ -298,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { | |||
// decodeExtension decodes an extension encoded in b. | |||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { | |||
o := NewBuffer(b) | |||
t := reflect.TypeOf(extension.ExtensionType) | |||
props := extensionProperties(extension) | |||
unmarshal := typeUnmarshaler(t, extension.Tag) | |||
// t is a pointer to a struct, pointer to basic type or a slice. | |||
// Allocate a "field" to store the pointer/slice itself; the | |||
// pointer/slice will be stored here. We pass | |||
// the address of this field to props.dec. | |||
// This passes a zero field and a *t and lets props.dec | |||
// interpret it as a *struct{ x t }. | |||
// Allocate space to store the pointer/slice. | |||
value := reflect.New(t).Elem() | |||
var err error | |||
for { | |||
// Discard wire type and field number varint. It isn't needed. | |||
if _, err := o.DecodeVarint(); err != nil { | |||
return nil, err | |||
x, n := decodeVarint(b) | |||
if n == 0 { | |||
return nil, io.ErrUnexpectedEOF | |||
} | |||
b = b[n:] | |||
wire := int(x) & 7 | |||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { | |||
b, err = unmarshal(b, valToPointer(value.Addr()), wire) | |||
if err != nil { | |||
return nil, err | |||
} | |||
if o.index >= len(o.buf) { | |||
if len(b) == 0 { | |||
break | |||
} | |||
} | |||
@@ -332,10 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { | |||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es. | |||
// The returned slice has the same length as es; missing extensions will appear as nil elements. | |||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { | |||
epb, ok := pb.(extendableProto) | |||
if !ok { | |||
err = errors.New("proto: not an extendable proto") | |||
return | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return nil, err | |||
} | |||
extensions = make([]interface{}, len(es)) | |||
for i, e := range es { | |||
@@ -350,9 +446,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e | |||
return | |||
} | |||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. | |||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing | |||
// just the Field field, which defines the extension's field number. | |||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return nil, err | |||
} | |||
registeredExtensions := RegisteredExtensions(pb) | |||
emap, mu := epb.extensionsRead() | |||
if emap == nil { | |||
return nil, nil | |||
} | |||
mu.Lock() | |||
defer mu.Unlock() | |||
extensions := make([]*ExtensionDesc, 0, len(emap)) | |||
for extid, e := range emap { | |||
desc := e.desc | |||
if desc == nil { | |||
desc = registeredExtensions[extid] | |||
if desc == nil { | |||
desc = &ExtensionDesc{Field: extid} | |||
} | |||
} | |||
extensions = append(extensions, desc) | |||
} | |||
return extensions, nil | |||
} | |||
// SetExtension sets the specified extension of pb to the specified value. | |||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { | |||
if err := checkExtensionTypes(pb, extension); err != nil { | |||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return err | |||
} | |||
if err := checkExtensionTypes(epb, extension); err != nil { | |||
return err | |||
} | |||
typ := reflect.TypeOf(extension.ExtensionType) | |||
@@ -368,10 +499,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{ | |||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) | |||
} | |||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} | |||
extmap := epb.extensionsWrite() | |||
extmap[extension.Field] = Extension{desc: extension, value: value} | |||
return nil | |||
} | |||
// ClearAllExtensions clears all extensions from pb. | |||
func ClearAllExtensions(pb Message) { | |||
epb, err := extendable(pb) | |||
if err != nil { | |||
return | |||
} | |||
m := epb.extensionsWrite() | |||
for k := range m { | |||
delete(m, k) | |||
} | |||
} | |||
// A global registry of extensions. | |||
// The generated code will register the generated descriptors by calling RegisterExtension. | |||
@@ -73,7 +73,6 @@ for a protocol buffer variable v: | |||
When the .proto file specifies `syntax="proto3"`, there are some differences: | |||
- Non-repeated fields of non-message type are values instead of pointers. | |||
- Getters are only generated for message and oneof fields. | |||
- Enum types do not get an Enum method. | |||
The simplest way to describe this is to see an example. | |||
@@ -274,6 +273,67 @@ import ( | |||
"sync" | |||
) | |||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. | |||
// Marshal reports this when a required field is not initialized. | |||
// Unmarshal reports this when a required field is missing from the wire data. | |||
type RequiredNotSetError struct{ field string } | |||
func (e *RequiredNotSetError) Error() string { | |||
if e.field == "" { | |||
return fmt.Sprintf("proto: required field not set") | |||
} | |||
return fmt.Sprintf("proto: required field %q not set", e.field) | |||
} | |||
func (e *RequiredNotSetError) RequiredNotSet() bool { | |||
return true | |||
} | |||
type invalidUTF8Error struct{ field string } | |||
func (e *invalidUTF8Error) Error() string { | |||
if e.field == "" { | |||
return "proto: invalid UTF-8 detected" | |||
} | |||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) | |||
} | |||
func (e *invalidUTF8Error) InvalidUTF8() bool { | |||
return true | |||
} | |||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. | |||
// This error should not be exposed to the external API as such errors should | |||
// be recreated with the field information. | |||
var errInvalidUTF8 = &invalidUTF8Error{} | |||
// isNonFatal reports whether the error is either a RequiredNotSet error | |||
// or a InvalidUTF8 error. | |||
func isNonFatal(err error) bool { | |||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { | |||
return true | |||
} | |||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { | |||
return true | |||
} | |||
return false | |||
} | |||
type nonFatal struct{ E error } | |||
// Merge merges err into nf and reports whether it was successful. | |||
// Otherwise it returns false for any fatal non-nil errors. | |||
func (nf *nonFatal) Merge(err error) (ok bool) { | |||
if err == nil { | |||
return true // not an error | |||
} | |||
if !isNonFatal(err) { | |||
return false // fatal error | |||
} | |||
if nf.E == nil { | |||
nf.E = err // store first instance of non-fatal error | |||
} | |||
return true | |||
} | |||
// Message is implemented by generated protocol buffer messages. | |||
type Message interface { | |||
Reset() | |||
@@ -308,18 +368,9 @@ func GetStats() Stats { return stats } | |||
// temporary Buffer and are fine for most applications. | |||
type Buffer struct { | |||
buf []byte // encode/decode byte stream | |||
index int // write point | |||
index int // read point | |||
// pools of basic types to amortize allocation. | |||
bools []bool | |||
uint32s []uint32 | |||
uint64s []uint64 | |||
// extra pools, only used with pointer_reflect.go | |||
int32s []int32 | |||
int64s []int64 | |||
float32s []float32 | |||
float64s []float64 | |||
deterministic bool | |||
} | |||
// NewBuffer allocates a new Buffer and initializes its internal data to | |||
@@ -344,6 +395,30 @@ func (p *Buffer) SetBuf(s []byte) { | |||
// Bytes returns the contents of the Buffer. | |||
func (p *Buffer) Bytes() []byte { return p.buf } | |||
// SetDeterministic sets whether to use deterministic serialization. | |||
// | |||
// Deterministic serialization guarantees that for a given binary, equal | |||
// messages will always be serialized to the same bytes. This implies: | |||
// | |||
// - Repeated serialization of a message will return the same bytes. | |||
// - Different processes of the same binary (which may be executing on | |||
// different machines) will serialize equal messages to the same bytes. | |||
// | |||
// Note that the deterministic serialization is NOT canonical across | |||
// languages. It is not guaranteed to remain stable over time. It is unstable | |||
// across different builds with schema changes due to unknown fields. | |||
// Users who need canonical serialization (e.g., persistent storage in a | |||
// canonical form, fingerprinting, etc.) should define their own | |||
// canonicalization specification and implement their own serializer rather | |||
// than relying on this API. | |||
// | |||
// If deterministic serialization is requested, map entries will be sorted | |||
// by keys in lexographical order. This is an implementation detail and | |||
// subject to change. | |||
func (p *Buffer) SetDeterministic(deterministic bool) { | |||
p.deterministic = deterministic | |||
} | |||
/* | |||
* Helper routines for simplifying the creation of optional fields of basic type. | |||
*/ | |||
@@ -832,22 +907,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes | |||
return sf, false, nil | |||
} | |||
// mapKeys returns a sort.Interface to be used for sorting the map keys. | |||
// Map fields may have key types of non-float scalars, strings and enums. | |||
// The easiest way to sort them in some deterministic order is to use fmt. | |||
// If this turns out to be inefficient we can always consider other options, | |||
// such as doing a Schwartzian transform. | |||
func mapKeys(vs []reflect.Value) sort.Interface { | |||
s := mapKeySorter{ | |||
vs: vs, | |||
// default Less function: textual comparison | |||
less: func(a, b reflect.Value) bool { | |||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) | |||
}, | |||
} | |||
s := mapKeySorter{vs: vs} | |||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; | |||
// numeric keys are sorted numerically. | |||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. | |||
if len(vs) == 0 { | |||
return s | |||
} | |||
@@ -856,6 +921,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { | |||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } | |||
case reflect.Uint32, reflect.Uint64: | |||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } | |||
case reflect.Bool: | |||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true | |||
case reflect.String: | |||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } | |||
default: | |||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) | |||
} | |||
return s | |||
@@ -889,6 +960,20 @@ func isProto3Zero(v reflect.Value) bool { | |||
return false | |||
} | |||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files | |||
// to assert that that code is compatible with this version of the proto package. | |||
const ProtoPackageIsVersion2 = true | |||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files | |||
// to assert that that code is compatible with this version of the proto package. | |||
const ProtoPackageIsVersion1 = true | |||
// InternalMessageInfo is a type used internally by generated .pb.go files. | |||
// This type is not intended to be used by non-generated code. | |||
// This type is not subject to any compatibility guarantee. | |||
type InternalMessageInfo struct { | |||
marshal *marshalInfo | |||
unmarshal *unmarshalInfo | |||
merge *mergeInfo | |||
discard *discardInfo | |||
} |
@@ -42,6 +42,7 @@ import ( | |||
"fmt" | |||
"reflect" | |||
"sort" | |||
"sync" | |||
) | |||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. | |||
@@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { | |||
} | |||
func (ms *messageSet) Has(pb Message) bool { | |||
if ms.find(pb) != nil { | |||
return true | |||
} | |||
return false | |||
return ms.find(pb) != nil | |||
} | |||
func (ms *messageSet) Unmarshal(pb Message) error { | |||
@@ -149,36 +147,54 @@ func skipVarint(buf []byte) []byte { | |||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format. | |||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. | |||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { | |||
if err := encodeExtensionMap(m); err != nil { | |||
return nil, err | |||
} | |||
func MarshalMessageSet(exts interface{}) ([]byte, error) { | |||
return marshalMessageSet(exts, false) | |||
} | |||
// Sort extension IDs to provide a deterministic encoding. | |||
// See also enc_map in encode.go. | |||
ids := make([]int, 0, len(m)) | |||
for id := range m { | |||
ids = append(ids, int(id)) | |||
} | |||
sort.Ints(ids) | |||
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} | |||
for _, id := range ids { | |||
e := m[int32(id)] | |||
// Remove the wire type and field number varint, as well as the length varint. | |||
msg := skipVarint(skipVarint(e.enc)) | |||
ms.Item = append(ms.Item, &_MessageSet_Item{ | |||
TypeId: Int32(int32(id)), | |||
Message: msg, | |||
}) | |||
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. | |||
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { | |||
switch exts := exts.(type) { | |||
case *XXX_InternalExtensions: | |||
var u marshalInfo | |||
siz := u.sizeMessageSet(exts) | |||
b := make([]byte, 0, siz) | |||
return u.appendMessageSet(b, exts, deterministic) | |||
case map[int32]Extension: | |||
// This is an old-style extension map. | |||
// Wrap it in a new-style XXX_InternalExtensions. | |||
ie := XXX_InternalExtensions{ | |||
p: &struct { | |||
mu sync.Mutex | |||
extensionMap map[int32]Extension | |||
}{ | |||
extensionMap: exts, | |||
}, | |||
} | |||
var u marshalInfo | |||
siz := u.sizeMessageSet(&ie) | |||
b := make([]byte, 0, siz) | |||
return u.appendMessageSet(b, &ie, deterministic) | |||
default: | |||
return nil, errors.New("proto: not an extension map") | |||
} | |||
return Marshal(ms) | |||
} | |||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. | |||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. | |||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { | |||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. | |||
func UnmarshalMessageSet(buf []byte, exts interface{}) error { | |||
var m map[int32]Extension | |||
switch exts := exts.(type) { | |||
case *XXX_InternalExtensions: | |||
m = exts.extensionsWrite() | |||
case map[int32]Extension: | |||
m = exts | |||
default: | |||
return errors.New("proto: not an extension map") | |||
} | |||
ms := new(messageSet) | |||
if err := Unmarshal(buf, ms); err != nil { | |||
return err | |||
@@ -209,7 +225,24 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { | |||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. | |||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. | |||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { | |||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { | |||
var m map[int32]Extension | |||
switch exts := exts.(type) { | |||
case *XXX_InternalExtensions: | |||
var mu sync.Locker | |||
m, mu = exts.extensionsRead() | |||
if m != nil { | |||
// Keep the extensions map locked until we're done marshaling to prevent | |||
// races between marshaling and unmarshaling the lazily-{en,de}coded | |||
// values. | |||
mu.Lock() | |||
defer mu.Unlock() | |||
} | |||
case map[int32]Extension: | |||
m = exts | |||
default: | |||
return nil, errors.New("proto: not an extension map") | |||
} | |||
var b bytes.Buffer | |||
b.WriteByte('{') | |||
@@ -222,15 +255,16 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { | |||
for i, id := range ids { | |||
ext := m[id] | |||
if i > 0 { | |||
b.WriteByte(',') | |||
} | |||
msd, ok := messageSetMap[id] | |||
if !ok { | |||
// Unknown type; we can't render it, so skip it. | |||
continue | |||
} | |||
if i > 0 && b.Len() > 1 { | |||
b.WriteByte(',') | |||
} | |||
fmt.Fprintf(&b, `"[%s]":`, msd.name) | |||
x := ext.value | |||
@@ -252,7 +286,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { | |||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. | |||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. | |||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { | |||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { | |||
// Common-case fast path. | |||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { | |||
return nil | |||
@@ -29,7 +29,7 @@ | |||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
// +build appengine | |||
// +build purego appengine js | |||
// This file contains an implementation of proto field accesses using package reflect. | |||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can | |||
@@ -38,32 +38,13 @@ | |||
package proto | |||
import ( | |||
"math" | |||
"reflect" | |||
"sync" | |||
) | |||
// A structPointer is a pointer to a struct. | |||
type structPointer struct { | |||
v reflect.Value | |||
} | |||
// toStructPointer returns a structPointer equivalent to the given reflect value. | |||
// The reflect value must itself be a pointer to a struct. | |||
func toStructPointer(v reflect.Value) structPointer { | |||
return structPointer{v} | |||
} | |||
// IsNil reports whether p is nil. | |||
func structPointer_IsNil(p structPointer) bool { | |||
return p.v.IsNil() | |||
} | |||
const unsafeAllowed = false | |||
// Interface returns the struct pointer as an interface value. | |||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { | |||
return p.v.Interface() | |||
} | |||
// A field identifies a field in a struct, accessible from a structPointer. | |||
// A field identifies a field in a struct, accessible from a pointer. | |||
// In this implementation, a field is identified by the sequence of field indices | |||
// passed to reflect's FieldByIndex. | |||
type field []int | |||
@@ -76,404 +57,301 @@ func toField(f *reflect.StructField) field { | |||
// invalidField is an invalid field identifier. | |||
var invalidField = field(nil) | |||
// zeroField is a noop when calling pointer.offset. | |||
var zeroField = field([]int{}) | |||
// IsValid reports whether the field identifier is valid. | |||
func (f field) IsValid() bool { return f != nil } | |||
// field returns the given field in the struct as a reflect value. | |||
func structPointer_field(p structPointer, f field) reflect.Value { | |||
// Special case: an extension map entry with a value of type T | |||
// passes a *T to the struct-handling code with a zero field, | |||
// expecting that it will be treated as equivalent to *struct{ X T }, | |||
// which has the same memory layout. We have to handle that case | |||
// specially, because reflect will panic if we call FieldByIndex on a | |||
// non-struct. | |||
if f == nil { | |||
return p.v.Elem() | |||
} | |||
return p.v.Elem().FieldByIndex(f) | |||
// The pointer type is for the table-driven decoder. | |||
// The implementation here uses a reflect.Value of pointer type to | |||
// create a generic pointer. In pointer_unsafe.go we use unsafe | |||
// instead of reflect to implement the same (but faster) interface. | |||
type pointer struct { | |||
v reflect.Value | |||
} | |||
// ifield returns the given field in the struct as an interface value. | |||
func structPointer_ifield(p structPointer, f field) interface{} { | |||
return structPointer_field(p, f).Addr().Interface() | |||
// toPointer converts an interface of pointer type to a pointer | |||
// that points to the same target. | |||
func toPointer(i *Message) pointer { | |||
return pointer{v: reflect.ValueOf(*i)} | |||
} | |||
// Bytes returns the address of a []byte field in the struct. | |||
func structPointer_Bytes(p structPointer, f field) *[]byte { | |||
return structPointer_ifield(p, f).(*[]byte) | |||
// toAddrPointer converts an interface to a pointer that points to | |||
// the interface data. | |||
func toAddrPointer(i *interface{}, isptr bool) pointer { | |||
v := reflect.ValueOf(*i) | |||
u := reflect.New(v.Type()) | |||
u.Elem().Set(v) | |||
return pointer{v: u} | |||
} | |||
// BytesSlice returns the address of a [][]byte field in the struct. | |||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte { | |||
return structPointer_ifield(p, f).(*[][]byte) | |||
// valToPointer converts v to a pointer. v must be of pointer type. | |||
func valToPointer(v reflect.Value) pointer { | |||
return pointer{v: v} | |||
} | |||
// Bool returns the address of a *bool field in the struct. | |||
func structPointer_Bool(p structPointer, f field) **bool { | |||
return structPointer_ifield(p, f).(**bool) | |||
// offset converts from a pointer to a structure to a pointer to | |||
// one of its fields. | |||
func (p pointer) offset(f field) pointer { | |||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} | |||
} | |||
// BoolVal returns the address of a bool field in the struct. | |||
func structPointer_BoolVal(p structPointer, f field) *bool { | |||
return structPointer_ifield(p, f).(*bool) | |||
func (p pointer) isNil() bool { | |||
return p.v.IsNil() | |||
} | |||
// BoolSlice returns the address of a []bool field in the struct. | |||
func structPointer_BoolSlice(p structPointer, f field) *[]bool { | |||
return structPointer_ifield(p, f).(*[]bool) | |||
// grow updates the slice s in place to make it one element longer. | |||
// s must be addressable. | |||
// Returns the (addressable) new element. | |||
func grow(s reflect.Value) reflect.Value { | |||
n, m := s.Len(), s.Cap() | |||
if n < m { | |||
s.SetLen(n + 1) | |||
} else { | |||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) | |||
} | |||
return s.Index(n) | |||
} | |||
// String returns the address of a *string field in the struct. | |||
func structPointer_String(p structPointer, f field) **string { | |||
return structPointer_ifield(p, f).(**string) | |||
func (p pointer) toInt64() *int64 { | |||
return p.v.Interface().(*int64) | |||
} | |||
// StringVal returns the address of a string field in the struct. | |||
func structPointer_StringVal(p structPointer, f field) *string { | |||
return structPointer_ifield(p, f).(*string) | |||
func (p pointer) toInt64Ptr() **int64 { | |||
return p.v.Interface().(**int64) | |||
} | |||
// StringSlice returns the address of a []string field in the struct. | |||
func structPointer_StringSlice(p structPointer, f field) *[]string { | |||
return structPointer_ifield(p, f).(*[]string) | |||
func (p pointer) toInt64Slice() *[]int64 { | |||
return p.v.Interface().(*[]int64) | |||
} | |||
// ExtMap returns the address of an extension map field in the struct. | |||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { | |||
return structPointer_ifield(p, f).(*map[int32]Extension) | |||
} | |||
var int32ptr = reflect.TypeOf((*int32)(nil)) | |||
// NewAt returns the reflect.Value for a pointer to a field in the struct. | |||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { | |||
return structPointer_field(p, f).Addr() | |||
func (p pointer) toInt32() *int32 { | |||
return p.v.Convert(int32ptr).Interface().(*int32) | |||
} | |||
// SetStructPointer writes a *struct field in the struct. | |||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { | |||
structPointer_field(p, f).Set(q.v) | |||
// The toInt32Ptr/Slice methods don't work because of enums. | |||
// Instead, we must use set/get methods for the int32ptr/slice case. | |||
/* | |||
func (p pointer) toInt32Ptr() **int32 { | |||
return p.v.Interface().(**int32) | |||
} | |||
// GetStructPointer reads a *struct field in the struct. | |||
func structPointer_GetStructPointer(p structPointer, f field) structPointer { | |||
return structPointer{structPointer_field(p, f)} | |||
func (p pointer) toInt32Slice() *[]int32 { | |||
return p.v.Interface().(*[]int32) | |||
} | |||
// StructPointerSlice the address of a []*struct field in the struct. | |||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { | |||
return structPointerSlice{structPointer_field(p, f)} | |||
*/ | |||
func (p pointer) getInt32Ptr() *int32 { | |||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { | |||
// raw int32 type | |||
return p.v.Elem().Interface().(*int32) | |||
} | |||
// an enum | |||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32) | |||
} | |||
func (p pointer) setInt32Ptr(v int32) { | |||
// Allocate value in a *int32. Possibly convert that to a *enum. | |||
// Then assign it to a **int32 or **enum. | |||
// Note: we can convert *int32 to *enum, but we can't convert | |||
// **int32 to **enum! | |||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) | |||
} | |||
// getInt32Slice copies []int32 from p as a new slice. | |||
// This behavior differs from the implementation in pointer_unsafe.go. | |||
func (p pointer) getInt32Slice() []int32 { | |||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { | |||
// raw int32 type | |||
return p.v.Elem().Interface().([]int32) | |||
} | |||
// an enum | |||
// Allocate a []int32, then assign []enum's values into it. | |||
// Note: we can't convert []enum to []int32. | |||
slice := p.v.Elem() | |||
s := make([]int32, slice.Len()) | |||
for i := 0; i < slice.Len(); i++ { | |||
s[i] = int32(slice.Index(i).Int()) | |||
} | |||
return s | |||
} | |||
// A structPointerSlice represents the address of a slice of pointers to structs | |||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. | |||
type structPointerSlice struct { | |||
v reflect.Value | |||
// setInt32Slice copies []int32 into p as a new slice. | |||
// This behavior differs from the implementation in pointer_unsafe.go. | |||
func (p pointer) setInt32Slice(v []int32) { | |||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { | |||
// raw int32 type | |||
p.v.Elem().Set(reflect.ValueOf(v)) | |||
return | |||
} | |||
// an enum | |||
// Allocate a []enum, then assign []int32's values into it. | |||
// Note: we can't convert []enum to []int32. | |||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) | |||
for i, x := range v { | |||
slice.Index(i).SetInt(int64(x)) | |||
} | |||
p.v.Elem().Set(slice) | |||
} | |||
func (p structPointerSlice) Len() int { return p.v.Len() } | |||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } | |||
func (p structPointerSlice) Append(q structPointer) { | |||
p.v.Set(reflect.Append(p.v, q.v)) | |||
func (p pointer) appendInt32Slice(v int32) { | |||
grow(p.v.Elem()).SetInt(int64(v)) | |||
} | |||
var ( | |||
int32Type = reflect.TypeOf(int32(0)) | |||
uint32Type = reflect.TypeOf(uint32(0)) | |||
float32Type = reflect.TypeOf(float32(0)) | |||
int64Type = reflect.TypeOf(int64(0)) | |||
uint64Type = reflect.TypeOf(uint64(0)) | |||
float64Type = reflect.TypeOf(float64(0)) | |||
) | |||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum. | |||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. | |||
type word32 struct { | |||
v reflect.Value | |||
func (p pointer) toUint64() *uint64 { | |||
return p.v.Interface().(*uint64) | |||
} | |||
// IsNil reports whether p is nil. | |||
func word32_IsNil(p word32) bool { | |||
return p.v.IsNil() | |||
func (p pointer) toUint64Ptr() **uint64 { | |||
return p.v.Interface().(**uint64) | |||
} | |||
// Set sets p to point at a newly allocated word with bits set to x. | |||
func word32_Set(p word32, o *Buffer, x uint32) { | |||
t := p.v.Type().Elem() | |||
switch t { | |||
case int32Type: | |||
if len(o.int32s) == 0 { | |||
o.int32s = make([]int32, uint32PoolSize) | |||
} | |||
o.int32s[0] = int32(x) | |||
p.v.Set(reflect.ValueOf(&o.int32s[0])) | |||
o.int32s = o.int32s[1:] | |||
return | |||
case uint32Type: | |||
if len(o.uint32s) == 0 { | |||
o.uint32s = make([]uint32, uint32PoolSize) | |||
} | |||
o.uint32s[0] = x | |||
p.v.Set(reflect.ValueOf(&o.uint32s[0])) | |||
o.uint32s = o.uint32s[1:] | |||
return | |||
case float32Type: | |||
if len(o.float32s) == 0 { | |||
o.float32s = make([]float32, uint32PoolSize) | |||
} | |||
o.float32s[0] = math.Float32frombits(x) | |||
p.v.Set(reflect.ValueOf(&o.float32s[0])) | |||
o.float32s = o.float32s[1:] | |||
return | |||
} | |||
// must be enum | |||
p.v.Set(reflect.New(t)) | |||
p.v.Elem().SetInt(int64(int32(x))) | |||
func (p pointer) toUint64Slice() *[]uint64 { | |||
return p.v.Interface().(*[]uint64) | |||
} | |||
// Get gets the bits pointed at by p, as a uint32. | |||
func word32_Get(p word32) uint32 { | |||
elem := p.v.Elem() | |||
switch elem.Kind() { | |||
case reflect.Int32: | |||
return uint32(elem.Int()) | |||
case reflect.Uint32: | |||
return uint32(elem.Uint()) | |||
case reflect.Float32: | |||
return math.Float32bits(float32(elem.Float())) | |||
} | |||
panic("unreachable") | |||
func (p pointer) toUint32() *uint32 { | |||
return p.v.Interface().(*uint32) | |||
} | |||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. | |||
func structPointer_Word32(p structPointer, f field) word32 { | |||
return word32{structPointer_field(p, f)} | |||
func (p pointer) toUint32Ptr() **uint32 { | |||
return p.v.Interface().(**uint32) | |||
} | |||
// A word32Val represents a field of type int32, uint32, float32, or enum. | |||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. | |||
type word32Val struct { | |||
v reflect.Value | |||
func (p pointer) toUint32Slice() *[]uint32 { | |||
return p.v.Interface().(*[]uint32) | |||
} | |||
// Set sets *p to x. | |||
func word32Val_Set(p word32Val, x uint32) { | |||
switch p.v.Type() { | |||
case int32Type: | |||
p.v.SetInt(int64(x)) | |||
return | |||
case uint32Type: | |||
p.v.SetUint(uint64(x)) | |||
return | |||
case float32Type: | |||
p.v.SetFloat(float64(math.Float32frombits(x))) | |||
return | |||
} | |||
// must be enum | |||
p.v.SetInt(int64(int32(x))) | |||
func (p pointer) toBool() *bool { | |||
return p.v.Interface().(*bool) | |||
} | |||
// Get gets the bits pointed at by p, as a uint32. | |||
func word32Val_Get(p word32Val) uint32 { | |||
elem := p.v | |||
switch elem.Kind() { | |||
case reflect.Int32: | |||
return uint32(elem.Int()) | |||
case reflect.Uint32: | |||
return uint32(elem.Uint()) | |||
case reflect.Float32: | |||
return math.Float32bits(float32(elem.Float())) | |||
} | |||
panic("unreachable") | |||
func (p pointer) toBoolPtr() **bool { | |||
return p.v.Interface().(**bool) | |||
} | |||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. | |||
func structPointer_Word32Val(p structPointer, f field) word32Val { | |||
return word32Val{structPointer_field(p, f)} | |||
func (p pointer) toBoolSlice() *[]bool { | |||
return p.v.Interface().(*[]bool) | |||
} | |||
// A word32Slice is a slice of 32-bit values. | |||
// That is, v.Type() is []int32, []uint32, []float32, or []enum. | |||
type word32Slice struct { | |||
v reflect.Value | |||
func (p pointer) toFloat64() *float64 { | |||
return p.v.Interface().(*float64) | |||
} | |||
func (p word32Slice) Append(x uint32) { | |||
n, m := p.v.Len(), p.v.Cap() | |||
if n < m { | |||
p.v.SetLen(n + 1) | |||
} else { | |||
t := p.v.Type().Elem() | |||
p.v.Set(reflect.Append(p.v, reflect.Zero(t))) | |||
} | |||
elem := p.v.Index(n) | |||
switch elem.Kind() { | |||
case reflect.Int32: | |||
elem.SetInt(int64(int32(x))) | |||
case reflect.Uint32: | |||
elem.SetUint(uint64(x)) | |||
case reflect.Float32: | |||
elem.SetFloat(float64(math.Float32frombits(x))) | |||
} | |||
func (p pointer) toFloat64Ptr() **float64 { | |||
return p.v.Interface().(**float64) | |||
} | |||
func (p word32Slice) Len() int { | |||
return p.v.Len() | |||
func (p pointer) toFloat64Slice() *[]float64 { | |||
return p.v.Interface().(*[]float64) | |||
} | |||
func (p word32Slice) Index(i int) uint32 { | |||
elem := p.v.Index(i) | |||
switch elem.Kind() { | |||
case reflect.Int32: | |||
return uint32(elem.Int()) | |||
case reflect.Uint32: | |||
return uint32(elem.Uint()) | |||
case reflect.Float32: | |||
return math.Float32bits(float32(elem.Float())) | |||
} | |||
panic("unreachable") | |||
func (p pointer) toFloat32() *float32 { | |||
return p.v.Interface().(*float32) | |||
} | |||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. | |||
func structPointer_Word32Slice(p structPointer, f field) word32Slice { | |||
return word32Slice{structPointer_field(p, f)} | |||
func (p pointer) toFloat32Ptr() **float32 { | |||
return p.v.Interface().(**float32) | |||
} | |||
// word64 is like word32 but for 64-bit values. | |||
type word64 struct { | |||
v reflect.Value | |||
func (p pointer) toFloat32Slice() *[]float32 { | |||
return p.v.Interface().(*[]float32) | |||
} | |||
func word64_Set(p word64, o *Buffer, x uint64) { | |||
t := p.v.Type().Elem() | |||
switch t { | |||
case int64Type: | |||
if len(o.int64s) == 0 { | |||
o.int64s = make([]int64, uint64PoolSize) | |||
} | |||
o.int64s[0] = int64(x) | |||
p.v.Set(reflect.ValueOf(&o.int64s[0])) | |||
o.int64s = o.int64s[1:] | |||
return | |||
case uint64Type: | |||
if len(o.uint64s) == 0 { | |||
o.uint64s = make([]uint64, uint64PoolSize) | |||
} | |||
o.uint64s[0] = x | |||
p.v.Set(reflect.ValueOf(&o.uint64s[0])) | |||
o.uint64s = o.uint64s[1:] | |||
return | |||
case float64Type: | |||
if len(o.float64s) == 0 { | |||
o.float64s = make([]float64, uint64PoolSize) | |||
} | |||
o.float64s[0] = math.Float64frombits(x) | |||
p.v.Set(reflect.ValueOf(&o.float64s[0])) | |||
o.float64s = o.float64s[1:] | |||
return | |||
} | |||
panic("unreachable") | |||
func (p pointer) toString() *string { | |||
return p.v.Interface().(*string) | |||
} | |||
func word64_IsNil(p word64) bool { | |||
return p.v.IsNil() | |||
func (p pointer) toStringPtr() **string { | |||
return p.v.Interface().(**string) | |||
} | |||
func word64_Get(p word64) uint64 { | |||
elem := p.v.Elem() | |||
switch elem.Kind() { | |||
case reflect.Int64: | |||
return uint64(elem.Int()) | |||
case reflect.Uint64: | |||
return elem.Uint() | |||
case reflect.Float64: | |||
return math.Float64bits(elem.Float()) | |||
} | |||
panic("unreachable") | |||
func (p pointer) toStringSlice() *[]string { | |||
return p.v.Interface().(*[]string) | |||
} | |||
func structPointer_Word64(p structPointer, f field) word64 { | |||
return word64{structPointer_field(p, f)} | |||
func (p pointer) toBytes() *[]byte { | |||
return p.v.Interface().(*[]byte) | |||
} | |||
func (p pointer) toBytesSlice() *[][]byte { | |||
return p.v.Interface().(*[][]byte) | |||
} | |||
func (p pointer) toExtensions() *XXX_InternalExtensions { | |||
return p.v.Interface().(*XXX_InternalExtensions) | |||
} | |||
func (p pointer) toOldExtensions() *map[int32]Extension { | |||
return p.v.Interface().(*map[int32]Extension) | |||
} | |||
func (p pointer) getPointer() pointer { | |||
return pointer{v: p.v.Elem()} | |||
} | |||
func (p pointer) setPointer(q pointer) { | |||
p.v.Elem().Set(q.v) | |||
} | |||
func (p pointer) appendPointer(q pointer) { | |||
grow(p.v.Elem()).Set(q.v) | |||
} | |||
// word64Val is like word32Val but for 64-bit values. | |||
type word64Val struct { | |||
v reflect.Value | |||
// getPointerSlice copies []*T from p as a new []pointer. | |||
// This behavior differs from the implementation in pointer_unsafe.go. | |||
func (p pointer) getPointerSlice() []pointer { | |||
if p.v.IsNil() { | |||
return nil | |||
} | |||
n := p.v.Elem().Len() | |||
s := make([]pointer, n) | |||
for i := 0; i < n; i++ { | |||
s[i] = pointer{v: p.v.Elem().Index(i)} | |||
} | |||
return s | |||
} | |||
func word64Val_Set(p word64Val, o *Buffer, x uint64) { | |||
switch p.v.Type() { | |||
case int64Type: | |||
p.v.SetInt(int64(x)) | |||
return | |||
case uint64Type: | |||
p.v.SetUint(x) | |||
return | |||
case float64Type: | |||
p.v.SetFloat(math.Float64frombits(x)) | |||
// setPointerSlice copies []pointer into p as a new []*T. | |||
// This behavior differs from the implementation in pointer_unsafe.go. | |||
func (p pointer) setPointerSlice(v []pointer) { | |||
if v == nil { | |||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) | |||
return | |||
} | |||
panic("unreachable") | |||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) | |||
for _, p := range v { | |||
s = reflect.Append(s, p.v) | |||
} | |||
p.v.Elem().Set(s) | |||
} | |||
func word64Val_Get(p word64Val) uint64 { | |||
elem := p.v | |||
switch elem.Kind() { | |||
case reflect.Int64: | |||
return uint64(elem.Int()) | |||
case reflect.Uint64: | |||
return elem.Uint() | |||
case reflect.Float64: | |||
return math.Float64bits(elem.Float()) | |||
// getInterfacePointer returns a pointer that points to the | |||
// interface data of the interface pointed by p. | |||
func (p pointer) getInterfacePointer() pointer { | |||
if p.v.Elem().IsNil() { | |||
return pointer{v: p.v.Elem()} | |||
} | |||
panic("unreachable") | |||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct | |||
} | |||
func structPointer_Word64Val(p structPointer, f field) word64Val { | |||
return word64Val{structPointer_field(p, f)} | |||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value { | |||
// TODO: check that p.v.Type().Elem() == t? | |||
return p.v | |||
} | |||
type word64Slice struct { | |||
v reflect.Value | |||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
return *p | |||
} | |||
func (p word64Slice) Append(x uint64) { | |||
n, m := p.v.Len(), p.v.Cap() | |||
if n < m { | |||
p.v.SetLen(n + 1) | |||
} else { | |||
t := p.v.Type().Elem() | |||
p.v.Set(reflect.Append(p.v, reflect.Zero(t))) | |||
} | |||
elem := p.v.Index(n) | |||
switch elem.Kind() { | |||
case reflect.Int64: | |||
elem.SetInt(int64(int64(x))) | |||
case reflect.Uint64: | |||
elem.SetUint(uint64(x)) | |||
case reflect.Float64: | |||
elem.SetFloat(float64(math.Float64frombits(x))) | |||
} | |||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
*p = v | |||
} | |||
func (p word64Slice) Len() int { | |||
return p.v.Len() | |||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
return *p | |||
} | |||
func (p word64Slice) Index(i int) uint64 { | |||
elem := p.v.Index(i) | |||
switch elem.Kind() { | |||
case reflect.Int64: | |||
return uint64(elem.Int()) | |||
case reflect.Uint64: | |||
return uint64(elem.Uint()) | |||
case reflect.Float64: | |||
return math.Float64bits(float64(elem.Float())) | |||
} | |||
panic("unreachable") | |||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
*p = v | |||
} | |||
func structPointer_Word64Slice(p structPointer, f field) word64Slice { | |||
return word64Slice{structPointer_field(p, f)} | |||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
return *p | |||
} | |||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
*p = v | |||
} | |||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
return *p | |||
} | |||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { | |||
atomicLock.Lock() | |||
defer atomicLock.Unlock() | |||
*p = v | |||
} | |||
var atomicLock sync.Mutex |
@@ -29,7 +29,7 @@ | |||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
// +build !appengine | |||
// +build !purego,!appengine,!js | |||
// This file contains the implementation of the proto field accesses using package unsafe. | |||
@@ -37,38 +37,13 @@ package proto | |||
import ( | |||
"reflect" | |||
"sync/atomic" | |||
"unsafe" | |||
) | |||
// NOTE: These type_Foo functions would more idiomatically be methods, | |||
// but Go does not allow methods on pointer types, and we must preserve | |||
// some pointer type for the garbage collector. We use these | |||
// funcs with clunky names as our poor approximation to methods. | |||
// | |||
// An alternative would be | |||
// type structPointer struct { p unsafe.Pointer } | |||
// but that does not registerize as well. | |||
// A structPointer is a pointer to a struct. | |||
type structPointer unsafe.Pointer | |||
// toStructPointer returns a structPointer equivalent to the given reflect value. | |||
func toStructPointer(v reflect.Value) structPointer { | |||
return structPointer(unsafe.Pointer(v.Pointer())) | |||
} | |||
// IsNil reports whether p is nil. | |||
func structPointer_IsNil(p structPointer) bool { | |||
return p == nil | |||
} | |||
// Interface returns the struct pointer, assumed to have element type t, | |||
// as an interface value. | |||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} { | |||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface() | |||
} | |||
const unsafeAllowed = true | |||
// A field identifies a field in a struct, accessible from a structPointer. | |||
// A field identifies a field in a struct, accessible from a pointer. | |||
// In this implementation, a field is identified by its byte offset from the start of the struct. | |||
type field uintptr | |||
@@ -80,187 +55,254 @@ func toField(f *reflect.StructField) field { | |||
// invalidField is an invalid field identifier. | |||
const invalidField = ^field(0) | |||
// zeroField is a noop when calling pointer.offset. | |||
const zeroField = field(0) | |||
// IsValid reports whether the field identifier is valid. | |||
func (f field) IsValid() bool { | |||
return f != ^field(0) | |||
return f != invalidField | |||
} | |||
// Bytes returns the address of a []byte field in the struct. | |||
func structPointer_Bytes(p structPointer, f field) *[]byte { | |||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
// The pointer type below is for the new table-driven encoder/decoder. | |||
// The implementation here uses unsafe.Pointer to create a generic pointer. | |||
// In pointer_reflect.go we use reflect instead of unsafe to implement | |||
// the same (but slower) interface. | |||
type pointer struct { | |||
p unsafe.Pointer | |||
} | |||
// BytesSlice returns the address of a [][]byte field in the struct. | |||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte { | |||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
} | |||
// size of pointer | |||
var ptrSize = unsafe.Sizeof(uintptr(0)) | |||
// Bool returns the address of a *bool field in the struct. | |||
func structPointer_Bool(p structPointer, f field) **bool { | |||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
// toPointer converts an interface of pointer type to a pointer | |||
// that points to the same target. | |||
func toPointer(i *Message) pointer { | |||
// Super-tricky - read pointer out of data word of interface value. | |||
// Saves ~25ns over the equivalent: | |||
// return valToPointer(reflect.ValueOf(*i)) | |||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} | |||
} | |||
// BoolVal returns the address of a bool field in the struct. | |||
func structPointer_BoolVal(p structPointer, f field) *bool { | |||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
// toAddrPointer converts an interface to a pointer that points to | |||
// the interface data. | |||
func toAddrPointer(i *interface{}, isptr bool) pointer { | |||
// Super-tricky - read or get the address of data word of interface value. | |||
if isptr { | |||
// The interface is of pointer type, thus it is a direct interface. | |||
// The data word is the pointer data itself. We take its address. | |||
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} | |||
} | |||
// The interface is not of pointer type. The data word is the pointer | |||
// to the data. | |||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} | |||
} | |||
// BoolSlice returns the address of a []bool field in the struct. | |||
func structPointer_BoolSlice(p structPointer, f field) *[]bool { | |||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
// valToPointer converts v to a pointer. v must be of pointer type. | |||
func valToPointer(v reflect.Value) pointer { | |||
return pointer{p: unsafe.Pointer(v.Pointer())} | |||
} | |||
// String returns the address of a *string field in the struct. | |||
func structPointer_String(p structPointer, f field) **string { | |||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
// offset converts from a pointer to a structure to a pointer to | |||
// one of its fields. | |||
func (p pointer) offset(f field) pointer { | |||
// For safety, we should panic if !f.IsValid, however calling panic causes | |||
// this to no longer be inlineable, which is a serious performance cost. | |||
/* | |||
if !f.IsValid() { | |||
panic("invalid field") | |||
} | |||
*/ | |||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} | |||
} | |||
// StringVal returns the address of a string field in the struct. | |||
func structPointer_StringVal(p structPointer, f field) *string { | |||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
func (p pointer) isNil() bool { | |||
return p.p == nil | |||
} | |||
// StringSlice returns the address of a []string field in the struct. | |||
func structPointer_StringSlice(p structPointer, f field) *[]string { | |||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
func (p pointer) toInt64() *int64 { | |||
return (*int64)(p.p) | |||
} | |||
// ExtMap returns the address of an extension map field in the struct. | |||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { | |||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
func (p pointer) toInt64Ptr() **int64 { | |||
return (**int64)(p.p) | |||
} | |||
// NewAt returns the reflect.Value for a pointer to a field in the struct. | |||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { | |||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) | |||
func (p pointer) toInt64Slice() *[]int64 { | |||
return (*[]int64)(p.p) | |||
} | |||
// SetStructPointer writes a *struct field in the struct. | |||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { | |||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q | |||
func (p pointer) toInt32() *int32 { | |||
return (*int32)(p.p) | |||
} | |||
// GetStructPointer reads a *struct field in the struct. | |||
func structPointer_GetStructPointer(p structPointer, f field) structPointer { | |||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. | |||
/* | |||
func (p pointer) toInt32Ptr() **int32 { | |||
return (**int32)(p.p) | |||
} | |||
func (p pointer) toInt32Slice() *[]int32 { | |||
return (*[]int32)(p.p) | |||
} | |||
*/ | |||
func (p pointer) getInt32Ptr() *int32 { | |||
return *(**int32)(p.p) | |||
} | |||
// StructPointerSlice the address of a []*struct field in the struct. | |||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { | |||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
func (p pointer) setInt32Ptr(v int32) { | |||
*(**int32)(p.p) = &v | |||
} | |||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). | |||
type structPointerSlice []structPointer | |||
func (v *structPointerSlice) Len() int { return len(*v) } | |||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } | |||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } | |||
// A word32 is the address of a "pointer to 32-bit value" field. | |||
type word32 **uint32 | |||
// IsNil reports whether *v is nil. | |||
func word32_IsNil(p word32) bool { | |||
return *p == nil | |||
// getInt32Slice loads a []int32 from p. | |||
// The value returned is aliased with the original slice. | |||
// This behavior differs from the implementation in pointer_reflect.go. | |||
func (p pointer) getInt32Slice() []int32 { | |||
return *(*[]int32)(p.p) | |||
} | |||
// Set sets *v to point at a newly allocated word set to x. | |||
func word32_Set(p word32, o *Buffer, x uint32) { | |||
if len(o.uint32s) == 0 { | |||
o.uint32s = make([]uint32, uint32PoolSize) | |||
} | |||
o.uint32s[0] = x | |||
*p = &o.uint32s[0] | |||
o.uint32s = o.uint32s[1:] | |||
// setInt32Slice stores a []int32 to p. | |||
// The value set is aliased with the input slice. | |||
// This behavior differs from the implementation in pointer_reflect.go. | |||
func (p pointer) setInt32Slice(v []int32) { | |||
*(*[]int32)(p.p) = v | |||
} | |||
// Get gets the value pointed at by *v. | |||
func word32_Get(p word32) uint32 { | |||
return **p | |||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? | |||
func (p pointer) appendInt32Slice(v int32) { | |||
s := (*[]int32)(p.p) | |||
*s = append(*s, v) | |||
} | |||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. | |||
func structPointer_Word32(p structPointer, f field) word32 { | |||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
func (p pointer) toUint64() *uint64 { | |||
return (*uint64)(p.p) | |||
} | |||
// A word32Val is the address of a 32-bit value field. | |||
type word32Val *uint32 | |||
// Set sets *p to x. | |||
func word32Val_Set(p word32Val, x uint32) { | |||
*p = x | |||
func (p pointer) toUint64Ptr() **uint64 { | |||
return (**uint64)(p.p) | |||
} | |||
// Get gets the value pointed at by p. | |||
func word32Val_Get(p word32Val) uint32 { | |||
return *p | |||
func (p pointer) toUint64Slice() *[]uint64 { | |||
return (*[]uint64)(p.p) | |||
} | |||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. | |||
func structPointer_Word32Val(p structPointer, f field) word32Val { | |||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
func (p pointer) toUint32() *uint32 { | |||
return (*uint32)(p.p) | |||
} | |||
// A word32Slice is a slice of 32-bit values. | |||
type word32Slice []uint32 | |||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } | |||
func (v *word32Slice) Len() int { return len(*v) } | |||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } | |||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. | |||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice { | |||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
func (p pointer) toUint32Ptr() **uint32 { | |||
return (**uint32)(p.p) | |||
} | |||
// word64 is like word32 but for 64-bit values. | |||
type word64 **uint64 | |||
func word64_Set(p word64, o *Buffer, x uint64) { | |||
if len(o.uint64s) == 0 { | |||
o.uint64s = make([]uint64, uint64PoolSize) | |||
} | |||
o.uint64s[0] = x | |||
*p = &o.uint64s[0] | |||
o.uint64s = o.uint64s[1:] | |||
func (p pointer) toUint32Slice() *[]uint32 { | |||
return (*[]uint32)(p.p) | |||
} | |||
func word64_IsNil(p word64) bool { | |||
return *p == nil | |||
func (p pointer) toBool() *bool { | |||
return (*bool)(p.p) | |||
} | |||
func word64_Get(p word64) uint64 { | |||
return **p | |||
func (p pointer) toBoolPtr() **bool { | |||
return (**bool)(p.p) | |||
} | |||
func (p pointer) toBoolSlice() *[]bool { | |||
return (*[]bool)(p.p) | |||
} | |||
func (p pointer) toFloat64() *float64 { | |||
return (*float64)(p.p) | |||
} | |||
func (p pointer) toFloat64Ptr() **float64 { | |||
return (**float64)(p.p) | |||
} | |||
func (p pointer) toFloat64Slice() *[]float64 { | |||
return (*[]float64)(p.p) | |||
} | |||
func (p pointer) toFloat32() *float32 { | |||
return (*float32)(p.p) | |||
} | |||
func (p pointer) toFloat32Ptr() **float32 { | |||
return (**float32)(p.p) | |||
} | |||
func (p pointer) toFloat32Slice() *[]float32 { | |||
return (*[]float32)(p.p) | |||
} | |||
func (p pointer) toString() *string { | |||
return (*string)(p.p) | |||
} | |||
func (p pointer) toStringPtr() **string { | |||
return (**string)(p.p) | |||
} | |||
func (p pointer) toStringSlice() *[]string { | |||
return (*[]string)(p.p) | |||
} | |||
func (p pointer) toBytes() *[]byte { | |||
return (*[]byte)(p.p) | |||
} | |||
func (p pointer) toBytesSlice() *[][]byte { | |||
return (*[][]byte)(p.p) | |||
} | |||
func (p pointer) toExtensions() *XXX_InternalExtensions { | |||
return (*XXX_InternalExtensions)(p.p) | |||
} | |||
func (p pointer) toOldExtensions() *map[int32]Extension { | |||
return (*map[int32]Extension)(p.p) | |||
} | |||
func structPointer_Word64(p structPointer, f field) word64 { | |||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
// getPointerSlice loads []*T from p as a []pointer. | |||
// The value returned is aliased with the original slice. | |||
// This behavior differs from the implementation in pointer_reflect.go. | |||
func (p pointer) getPointerSlice() []pointer { | |||
// Super-tricky - p should point to a []*T where T is a | |||
// message type. We load it as []pointer. | |||
return *(*[]pointer)(p.p) | |||
} | |||
// word64Val is like word32Val but for 64-bit values. | |||
type word64Val *uint64 | |||
// setPointerSlice stores []pointer into p as a []*T. | |||
// The value set is aliased with the input slice. | |||
// This behavior differs from the implementation in pointer_reflect.go. | |||
func (p pointer) setPointerSlice(v []pointer) { | |||
// Super-tricky - p should point to a []*T where T is a | |||
// message type. We store it as []pointer. | |||
*(*[]pointer)(p.p) = v | |||
} | |||
func word64Val_Set(p word64Val, o *Buffer, x uint64) { | |||
*p = x | |||
// getPointer loads the pointer at p and returns it. | |||
func (p pointer) getPointer() pointer { | |||
return pointer{p: *(*unsafe.Pointer)(p.p)} | |||
} | |||
func word64Val_Get(p word64Val) uint64 { | |||
return *p | |||
// setPointer stores the pointer q at p. | |||
func (p pointer) setPointer(q pointer) { | |||
*(*unsafe.Pointer)(p.p) = q.p | |||
} | |||
func structPointer_Word64Val(p structPointer, f field) word64Val { | |||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
// append q to the slice pointed to by p. | |||
func (p pointer) appendPointer(q pointer) { | |||
s := (*[]unsafe.Pointer)(p.p) | |||
*s = append(*s, q.p) | |||
} | |||
// word64Slice is like word32Slice but for 64-bit values. | |||
type word64Slice []uint64 | |||
// getInterfacePointer returns a pointer that points to the | |||
// interface data of the interface pointed by p. | |||
func (p pointer) getInterfacePointer() pointer { | |||
// Super-tricky - read pointer out of data word of interface value. | |||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} | |||
} | |||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } | |||
func (v *word64Slice) Len() int { return len(*v) } | |||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } | |||
// asPointerTo returns a reflect.Value that is a pointer to an | |||
// object of type t stored at p. | |||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value { | |||
return reflect.NewAt(t, p.p) | |||
} | |||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice { | |||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { | |||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) | |||
} | |||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { | |||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) | |||
} | |||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { | |||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) | |||
} | |||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { | |||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) | |||
} | |||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { | |||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) | |||
} | |||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { | |||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) | |||
} | |||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { | |||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) | |||
} | |||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { | |||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) | |||
} |
@@ -58,42 +58,6 @@ const ( | |||
WireFixed32 = 5 | |||
) | |||
const startSize = 10 // initial slice/string sizes | |||
// Encoders are defined in encode.go | |||
// An encoder outputs the full representation of a field, including its | |||
// tag and encoder type. | |||
type encoder func(p *Buffer, prop *Properties, base structPointer) error | |||
// A valueEncoder encodes a single integer in a particular encoding. | |||
type valueEncoder func(o *Buffer, x uint64) error | |||
// Sizers are defined in encode.go | |||
// A sizer returns the encoded size of a field, including its tag and encoder | |||
// type. | |||
type sizer func(prop *Properties, base structPointer) int | |||
// A valueSizer returns the encoded size of a single integer in a particular | |||
// encoding. | |||
type valueSizer func(x uint64) int | |||
// Decoders are defined in decode.go | |||
// A decoder creates a value from its wire representation. | |||
// Unrecognized subelements are saved in unrec. | |||
type decoder func(p *Buffer, prop *Properties, base structPointer) error | |||
// A valueDecoder decodes a single integer in a particular encoding. | |||
type valueDecoder func(o *Buffer) (x uint64, err error) | |||
// A oneofMarshaler does the marshaling for all oneof fields in a message. | |||
type oneofMarshaler func(Message, *Buffer) error | |||
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. | |||
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) | |||
// A oneofSizer does the sizing for all oneof fields in a message. | |||
type oneofSizer func(Message) int | |||
// tagMap is an optimization over map[int]int for typical protocol buffer | |||
// use-cases. Encoded protocol buffers are often in tag order with small tag | |||
// numbers. | |||
@@ -140,13 +104,6 @@ type StructProperties struct { | |||
decoderTags tagMap // map from proto tag to struct field number | |||
decoderOrigNames map[string]int // map from original name to struct field number | |||
order []int // list of struct field numbers in tag order | |||
unrecField field // field id of the XXX_unrecognized []byte field | |||
extendable bool // is this an extendable proto | |||
oneofMarshaler oneofMarshaler | |||
oneofUnmarshaler oneofUnmarshaler | |||
oneofSizer oneofSizer | |||
stype reflect.Type | |||
// OneofTypes contains information about the oneof fields in this message. | |||
// It is keyed by the original name of a field. | |||
@@ -182,41 +139,24 @@ type Properties struct { | |||
Repeated bool | |||
Packed bool // relevant for repeated primitives only | |||
Enum string // set for enum types only | |||
proto3 bool // whether this is known to be a proto3 field; set for []byte only | |||
proto3 bool // whether this is known to be a proto3 field | |||
oneof bool // whether this is a oneof field | |||
Default string // default value | |||
HasDefault bool // whether an explicit default was provided | |||
def_uint64 uint64 | |||
enc encoder | |||
valEnc valueEncoder // set for bool and numeric types only | |||
field field | |||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) | |||
tagbuf [8]byte | |||
stype reflect.Type // set for struct types only | |||
sprop *StructProperties // set for struct types only | |||
isMarshaler bool | |||
isUnmarshaler bool | |||
mtype reflect.Type // set for map types only | |||
mkeyprop *Properties // set for map types only | |||
mvalprop *Properties // set for map types only | |||
size sizer | |||
valSize valueSizer // set for bool and numeric types only | |||
dec decoder | |||
valDec valueDecoder // set for bool and numeric types only | |||
// If this is a packable field, this will be the decoder for the packed version of the field. | |||
packedDec decoder | |||
stype reflect.Type // set for struct types only | |||
sprop *StructProperties // set for struct types only | |||
mtype reflect.Type // set for map types only | |||
MapKeyProp *Properties // set for map types only | |||
MapValProp *Properties // set for map types only | |||
} | |||
// String formats the properties in the protobuf struct field tag style. | |||
func (p *Properties) String() string { | |||
s := p.Wire | |||
s = "," | |||
s += "," | |||
s += strconv.Itoa(p.Tag) | |||
if p.Required { | |||
s += ",req" | |||
@@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { | |||
switch p.Wire { | |||
case "varint": | |||
p.WireType = WireVarint | |||
p.valEnc = (*Buffer).EncodeVarint | |||
p.valDec = (*Buffer).DecodeVarint | |||
p.valSize = sizeVarint | |||
case "fixed32": | |||
p.WireType = WireFixed32 | |||
p.valEnc = (*Buffer).EncodeFixed32 | |||
p.valDec = (*Buffer).DecodeFixed32 | |||
p.valSize = sizeFixed32 | |||
case "fixed64": | |||
p.WireType = WireFixed64 | |||
p.valEnc = (*Buffer).EncodeFixed64 | |||
p.valDec = (*Buffer).DecodeFixed64 | |||
p.valSize = sizeFixed64 | |||
case "zigzag32": | |||
p.WireType = WireVarint | |||
p.valEnc = (*Buffer).EncodeZigzag32 | |||
p.valDec = (*Buffer).DecodeZigzag32 | |||
p.valSize = sizeZigzag32 | |||
case "zigzag64": | |||
p.WireType = WireVarint | |||
p.valEnc = (*Buffer).EncodeZigzag64 | |||
p.valDec = (*Buffer).DecodeZigzag64 | |||
p.valSize = sizeZigzag64 | |||
case "bytes", "group": | |||
p.WireType = WireBytes | |||
// no numeric converter for non-numeric types | |||
@@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { | |||
return | |||
} | |||
outer: | |||
for i := 2; i < len(fields); i++ { | |||
f := fields[i] | |||
switch { | |||
@@ -326,260 +252,41 @@ func (p *Properties) Parse(s string) { | |||
if i+1 < len(fields) { | |||
// Commas aren't escaped, and def is always last. | |||
p.Default += "," + strings.Join(fields[i+1:], ",") | |||
break | |||
break outer | |||
} | |||
} | |||
} | |||
} | |||
func logNoSliceEnc(t1, t2 reflect.Type) { | |||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) | |||
} | |||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() | |||
// Initialize the fields for encoding and decoding. | |||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { | |||
p.enc = nil | |||
p.dec = nil | |||
p.size = nil | |||
// setFieldProps initializes the field properties for submessages and maps. | |||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { | |||
switch t1 := typ; t1.Kind() { | |||
default: | |||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) | |||
// proto3 scalar types | |||
case reflect.Bool: | |||
p.enc = (*Buffer).enc_proto3_bool | |||
p.dec = (*Buffer).dec_proto3_bool | |||
p.size = size_proto3_bool | |||
case reflect.Int32: | |||
p.enc = (*Buffer).enc_proto3_int32 | |||
p.dec = (*Buffer).dec_proto3_int32 | |||
p.size = size_proto3_int32 | |||
case reflect.Uint32: | |||
p.enc = (*Buffer).enc_proto3_uint32 | |||
p.dec = (*Buffer).dec_proto3_int32 // can reuse | |||
p.size = size_proto3_uint32 | |||
case reflect.Int64, reflect.Uint64: | |||
p.enc = (*Buffer).enc_proto3_int64 | |||
p.dec = (*Buffer).dec_proto3_int64 | |||
p.size = size_proto3_int64 | |||
case reflect.Float32: | |||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits | |||
p.dec = (*Buffer).dec_proto3_int32 | |||
p.size = size_proto3_uint32 | |||
case reflect.Float64: | |||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits | |||
p.dec = (*Buffer).dec_proto3_int64 | |||
p.size = size_proto3_int64 | |||
case reflect.String: | |||
p.enc = (*Buffer).enc_proto3_string | |||
p.dec = (*Buffer).dec_proto3_string | |||
p.size = size_proto3_string | |||
case reflect.Ptr: | |||
switch t2 := t1.Elem(); t2.Kind() { | |||
default: | |||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) | |||
break | |||
case reflect.Bool: | |||
p.enc = (*Buffer).enc_bool | |||
p.dec = (*Buffer).dec_bool | |||
p.size = size_bool | |||
case reflect.Int32: | |||
p.enc = (*Buffer).enc_int32 | |||
p.dec = (*Buffer).dec_int32 | |||
p.size = size_int32 | |||
case reflect.Uint32: | |||
p.enc = (*Buffer).enc_uint32 | |||
p.dec = (*Buffer).dec_int32 // can reuse | |||
p.size = size_uint32 | |||
case reflect.Int64, reflect.Uint64: | |||
p.enc = (*Buffer).enc_int64 | |||
p.dec = (*Buffer).dec_int64 | |||
p.size = size_int64 | |||
case reflect.Float32: | |||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits | |||
p.dec = (*Buffer).dec_int32 | |||
p.size = size_uint32 | |||
case reflect.Float64: | |||
p.enc = (*Buffer).enc_int64 // can just treat them as bits | |||
p.dec = (*Buffer).dec_int64 | |||
p.size = size_int64 | |||
case reflect.String: | |||
p.enc = (*Buffer).enc_string | |||
p.dec = (*Buffer).dec_string | |||
p.size = size_string | |||
case reflect.Struct: | |||
if t1.Elem().Kind() == reflect.Struct { | |||
p.stype = t1.Elem() | |||
p.isMarshaler = isMarshaler(t1) | |||
p.isUnmarshaler = isUnmarshaler(t1) | |||
if p.Wire == "bytes" { | |||
p.enc = (*Buffer).enc_struct_message | |||
p.dec = (*Buffer).dec_struct_message | |||
p.size = size_struct_message | |||
} else { | |||
p.enc = (*Buffer).enc_struct_group | |||
p.dec = (*Buffer).dec_struct_group | |||
p.size = size_struct_group | |||
} | |||
} | |||
case reflect.Slice: | |||
switch t2 := t1.Elem(); t2.Kind() { | |||
default: | |||
logNoSliceEnc(t1, t2) | |||
break | |||
case reflect.Bool: | |||
if p.Packed { | |||
p.enc = (*Buffer).enc_slice_packed_bool | |||
p.size = size_slice_packed_bool | |||
} else { | |||
p.enc = (*Buffer).enc_slice_bool | |||
p.size = size_slice_bool | |||
} | |||
p.dec = (*Buffer).dec_slice_bool | |||
p.packedDec = (*Buffer).dec_slice_packed_bool | |||
case reflect.Int32: | |||
if p.Packed { | |||
p.enc = (*Buffer).enc_slice_packed_int32 | |||
p.size = size_slice_packed_int32 | |||
} else { | |||
p.enc = (*Buffer).enc_slice_int32 | |||
p.size = size_slice_int32 | |||
} | |||
p.dec = (*Buffer).dec_slice_int32 | |||
p.packedDec = (*Buffer).dec_slice_packed_int32 | |||
case reflect.Uint32: | |||
if p.Packed { | |||
p.enc = (*Buffer).enc_slice_packed_uint32 | |||
p.size = size_slice_packed_uint32 | |||
} else { | |||
p.enc = (*Buffer).enc_slice_uint32 | |||
p.size = size_slice_uint32 | |||
} | |||
p.dec = (*Buffer).dec_slice_int32 | |||
p.packedDec = (*Buffer).dec_slice_packed_int32 | |||
case reflect.Int64, reflect.Uint64: | |||
if p.Packed { | |||
p.enc = (*Buffer).enc_slice_packed_int64 | |||
p.size = size_slice_packed_int64 | |||
} else { | |||
p.enc = (*Buffer).enc_slice_int64 | |||
p.size = size_slice_int64 | |||
} | |||
p.dec = (*Buffer).dec_slice_int64 | |||
p.packedDec = (*Buffer).dec_slice_packed_int64 | |||
case reflect.Uint8: | |||
p.enc = (*Buffer).enc_slice_byte | |||
p.dec = (*Buffer).dec_slice_byte | |||
p.size = size_slice_byte | |||
// This is a []byte, which is either a bytes field, | |||
// or the value of a map field. In the latter case, | |||
// we always encode an empty []byte, so we should not | |||
// use the proto3 enc/size funcs. | |||
// f == nil iff this is the key/value of a map field. | |||
if p.proto3 && f != nil { | |||
p.enc = (*Buffer).enc_proto3_slice_byte | |||
p.size = size_proto3_slice_byte | |||
} | |||
case reflect.Float32, reflect.Float64: | |||
switch t2.Bits() { | |||
case 32: | |||
// can just treat them as bits | |||
if p.Packed { | |||
p.enc = (*Buffer).enc_slice_packed_uint32 | |||
p.size = size_slice_packed_uint32 | |||
} else { | |||
p.enc = (*Buffer).enc_slice_uint32 | |||
p.size = size_slice_uint32 | |||
} | |||
p.dec = (*Buffer).dec_slice_int32 | |||
p.packedDec = (*Buffer).dec_slice_packed_int32 | |||
case 64: | |||
// can just treat them as bits | |||
if p.Packed { | |||
p.enc = (*Buffer).enc_slice_packed_int64 | |||
p.size = size_slice_packed_int64 | |||
} else { | |||
p.enc = (*Buffer).enc_slice_int64 | |||
p.size = size_slice_int64 | |||
} | |||
p.dec = (*Buffer).dec_slice_int64 | |||
p.packedDec = (*Buffer).dec_slice_packed_int64 | |||
default: | |||
logNoSliceEnc(t1, t2) | |||
break | |||
} | |||
case reflect.String: | |||
p.enc = (*Buffer).enc_slice_string | |||
p.dec = (*Buffer).dec_slice_string | |||
p.size = size_slice_string | |||
case reflect.Ptr: | |||
switch t3 := t2.Elem(); t3.Kind() { | |||
default: | |||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) | |||
break | |||
case reflect.Struct: | |||
p.stype = t2.Elem() | |||
p.isMarshaler = isMarshaler(t2) | |||
p.isUnmarshaler = isUnmarshaler(t2) | |||
if p.Wire == "bytes" { | |||
p.enc = (*Buffer).enc_slice_struct_message | |||
p.dec = (*Buffer).dec_slice_struct_message | |||
p.size = size_slice_struct_message | |||
} else { | |||
p.enc = (*Buffer).enc_slice_struct_group | |||
p.dec = (*Buffer).dec_slice_struct_group | |||
p.size = size_slice_struct_group | |||
} | |||
} | |||
case reflect.Slice: | |||
switch t2.Elem().Kind() { | |||
default: | |||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) | |||
break | |||
case reflect.Uint8: | |||
p.enc = (*Buffer).enc_slice_slice_byte | |||
p.dec = (*Buffer).dec_slice_slice_byte | |||
p.size = size_slice_slice_byte | |||
} | |||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { | |||
p.stype = t2.Elem() | |||
} | |||
case reflect.Map: | |||
p.enc = (*Buffer).enc_new_map | |||
p.dec = (*Buffer).dec_new_map | |||
p.size = size_new_map | |||
p.mtype = t1 | |||
p.mkeyprop = &Properties{} | |||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) | |||
p.mvalprop = &Properties{} | |||
p.MapKeyProp = &Properties{} | |||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) | |||
p.MapValProp = &Properties{} | |||
vtype := p.mtype.Elem() | |||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { | |||
// The value type is not a message (*T) or bytes ([]byte), | |||
// so we need encoders for the pointer to this type. | |||
vtype = reflect.PtrTo(vtype) | |||
} | |||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) | |||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) | |||
} | |||
// precalculate tag code | |||
wire := p.WireType | |||
if p.Packed { | |||
wire = WireBytes | |||
} | |||
x := uint32(p.Tag)<<3 | uint32(wire) | |||
i := 0 | |||
for i = 0; x > 127; i++ { | |||
p.tagbuf[i] = 0x80 | uint8(x&0x7F) | |||
x >>= 7 | |||
} | |||
p.tagbuf[i] = uint8(x) | |||
p.tagcode = p.tagbuf[0 : i+1] | |||
if p.stype != nil { | |||
if lockGetProp { | |||
p.sprop = GetProperties(p.stype) | |||
@@ -590,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock | |||
} | |||
var ( | |||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() | |||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() | |||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() | |||
) | |||
// isMarshaler reports whether type t implements Marshaler. | |||
func isMarshaler(t reflect.Type) bool { | |||
// We're checking for (likely) pointer-receiver methods | |||
// so if t is not a pointer, something is very wrong. | |||
// The calls above only invoke isMarshaler on pointer types. | |||
if t.Kind() != reflect.Ptr { | |||
panic("proto: misuse of isMarshaler") | |||
} | |||
return t.Implements(marshalerType) | |||
} | |||
// isUnmarshaler reports whether type t implements Unmarshaler. | |||
func isUnmarshaler(t reflect.Type) bool { | |||
// We're checking for (likely) pointer-receiver methods | |||
// so if t is not a pointer, something is very wrong. | |||
// The calls above only invoke isUnmarshaler on pointer types. | |||
if t.Kind() != reflect.Ptr { | |||
panic("proto: misuse of isUnmarshaler") | |||
} | |||
return t.Implements(unmarshalerType) | |||
} | |||
// Init populates the properties from a protocol buffer struct tag. | |||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { | |||
p.init(typ, name, tag, f, true) | |||
@@ -625,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF | |||
// "bytes,49,opt,def=hello!" | |||
p.Name = name | |||
p.OrigName = name | |||
if f != nil { | |||
p.field = toField(f) | |||
} | |||
if tag == "" { | |||
return | |||
} | |||
p.Parse(tag) | |||
p.setEncAndDec(typ, f, lockGetProp) | |||
p.setFieldProps(typ, f, lockGetProp) | |||
} | |||
var ( | |||
@@ -682,8 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { | |||
propertiesMap[t] = prop | |||
// build properties | |||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) | |||
prop.unrecField = invalidField | |||
prop.Prop = make([]*Properties, t.NumField()) | |||
prop.order = make([]int, t.NumField()) | |||
@@ -693,15 +372,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { | |||
name := f.Name | |||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) | |||
if f.Name == "XXX_extensions" { // special case | |||
p.enc = (*Buffer).enc_map | |||
p.dec = nil // not needed | |||
p.size = size_map | |||
oneof := f.Tag.Get("protobuf_oneof") // special case | |||
if oneof != "" { | |||
// Oneof fields don't use the traditional protobuf tag. | |||
p.OrigName = oneof | |||
} | |||
if f.Name == "XXX_unrecognized" { // special case | |||
prop.unrecField = toField(&f) | |||
} | |||
oneof := f.Tag.Get("protobuf_oneof") != "" // special case | |||
prop.Prop[i] = p | |||
prop.order[i] = i | |||
if debug { | |||
@@ -711,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { | |||
} | |||
print("\n") | |||
} | |||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { | |||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") | |||
} | |||
} | |||
// Re-order prop.order. | |||
@@ -724,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { | |||
} | |||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { | |||
var oots []interface{} | |||
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() | |||
prop.stype = t | |||
_, _, _, oots = om.XXX_OneofFuncs() | |||
// Interpret oneof metadata. | |||
prop.OneofTypes = make(map[string]*OneofProperties) | |||
@@ -775,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { | |||
return prop | |||
} | |||
// Return the Properties object for the x[0]'th field of the structure. | |||
func propByIndex(t reflect.Type, x []int) *Properties { | |||
if len(x) != 1 { | |||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) | |||
return nil | |||
} | |||
prop := GetProperties(t) | |||
return prop.Prop[x[0]] | |||
} | |||
// Get the address and type of a pointer to a struct from an interface. | |||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) { | |||
if pb == nil { | |||
err = ErrNil | |||
return | |||
} | |||
// get the reflect type of the pointer to the struct. | |||
t = reflect.TypeOf(pb) | |||
// get the address of the struct. | |||
value := reflect.ValueOf(pb) | |||
b = toStructPointer(value) | |||
return | |||
} | |||
// A global registry of enum types. | |||
// The generated code will register the generated maps by calling RegisterEnum. | |||
@@ -822,25 +469,76 @@ func EnumValueMap(enumType string) map[string]int32 { | |||
// A registry of all linked message types. | |||
// The string is a fully-qualified proto name ("pkg.Message"). | |||
var ( | |||
protoTypes = make(map[string]reflect.Type) | |||
revProtoTypes = make(map[reflect.Type]string) | |||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers | |||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types | |||
revProtoTypes = make(map[reflect.Type]string) | |||
) | |||
// RegisterType is called from generated code and maps from the fully qualified | |||
// proto name to the type (pointer to struct) of the protocol buffer. | |||
func RegisterType(x Message, name string) { | |||
if _, ok := protoTypes[name]; ok { | |||
if _, ok := protoTypedNils[name]; ok { | |||
// TODO: Some day, make this a panic. | |||
log.Printf("proto: duplicate proto type registered: %s", name) | |||
return | |||
} | |||
t := reflect.TypeOf(x) | |||
protoTypes[name] = t | |||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { | |||
// Generated code always calls RegisterType with nil x. | |||
// This check is just for extra safety. | |||
protoTypedNils[name] = x | |||
} else { | |||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message) | |||
} | |||
revProtoTypes[t] = name | |||
} | |||
// RegisterMapType is called from generated code and maps from the fully qualified | |||
// proto name to the native map type of the proto map definition. | |||
func RegisterMapType(x interface{}, name string) { | |||
if reflect.TypeOf(x).Kind() != reflect.Map { | |||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) | |||
} | |||
if _, ok := protoMapTypes[name]; ok { | |||
log.Printf("proto: duplicate proto type registered: %s", name) | |||
return | |||
} | |||
t := reflect.TypeOf(x) | |||
protoMapTypes[name] = t | |||
revProtoTypes[t] = name | |||
} | |||
// MessageName returns the fully-qualified proto name for the given message type. | |||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } | |||
func MessageName(x Message) string { | |||
type xname interface { | |||
XXX_MessageName() string | |||
} | |||
if m, ok := x.(xname); ok { | |||
return m.XXX_MessageName() | |||
} | |||
return revProtoTypes[reflect.TypeOf(x)] | |||
} | |||
// MessageType returns the message type (pointer to struct) for a named message. | |||
func MessageType(name string) reflect.Type { return protoTypes[name] } | |||
// The type is not guaranteed to implement proto.Message if the name refers to a | |||
// map entry. | |||
func MessageType(name string) reflect.Type { | |||
if t, ok := protoTypedNils[name]; ok { | |||
return reflect.TypeOf(t) | |||
} | |||
return protoMapTypes[name] | |||
} | |||
// A registry of all linked proto files. | |||
var ( | |||
protoFiles = make(map[string][]byte) // file name => fileDescriptor | |||
) | |||
// RegisterFile is called from generated code and maps from the | |||
// full file name of a .proto file to its compressed FileDescriptorProto. | |||
func RegisterFile(filename string, fileDescriptor []byte) { | |||
protoFiles[filename] = fileDescriptor | |||
} | |||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. | |||
func FileDescriptor(filename string) []byte { return protoFiles[filename] } |
@@ -0,0 +1,654 @@ | |||
// Go support for Protocol Buffers - Google's data interchange format | |||
// | |||
// Copyright 2016 The Go Authors. All rights reserved. | |||
// https://github.com/golang/protobuf | |||
// | |||
// Redistribution and use in source and binary forms, with or without | |||
// modification, are permitted provided that the following conditions are | |||
// met: | |||
// | |||
// * Redistributions of source code must retain the above copyright | |||
// notice, this list of conditions and the following disclaimer. | |||
// * Redistributions in binary form must reproduce the above | |||
// copyright notice, this list of conditions and the following disclaimer | |||
// in the documentation and/or other materials provided with the | |||
// distribution. | |||
// * Neither the name of Google Inc. nor the names of its | |||
// contributors may be used to endorse or promote products derived from | |||
// this software without specific prior written permission. | |||
// | |||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
package proto | |||
import ( | |||
"fmt" | |||
"reflect" | |||
"strings" | |||
"sync" | |||
"sync/atomic" | |||
) | |||
// Merge merges the src message into dst. | |||
// This assumes that dst and src of the same type and are non-nil. | |||
func (a *InternalMessageInfo) Merge(dst, src Message) { | |||
mi := atomicLoadMergeInfo(&a.merge) | |||
if mi == nil { | |||
mi = getMergeInfo(reflect.TypeOf(dst).Elem()) | |||
atomicStoreMergeInfo(&a.merge, mi) | |||
} | |||
mi.merge(toPointer(&dst), toPointer(&src)) | |||
} | |||
type mergeInfo struct { | |||
typ reflect.Type | |||
initialized int32 // 0: only typ is valid, 1: everything is valid | |||
lock sync.Mutex | |||
fields []mergeFieldInfo | |||
unrecognized field // Offset of XXX_unrecognized | |||
} | |||
type mergeFieldInfo struct { | |||
field field // Offset of field, guaranteed to be valid | |||
// isPointer reports whether the value in the field is a pointer. | |||
// This is true for the following situations: | |||
// * Pointer to struct | |||
// * Pointer to basic type (proto2 only) | |||
// * Slice (first value in slice header is a pointer) | |||
// * String (first value in string header is a pointer) | |||
isPointer bool | |||
// basicWidth reports the width of the field assuming that it is directly | |||
// embedded in the struct (as is the case for basic types in proto3). | |||
// The possible values are: | |||
// 0: invalid | |||
// 1: bool | |||
// 4: int32, uint32, float32 | |||
// 8: int64, uint64, float64 | |||
basicWidth int | |||
// Where dst and src are pointers to the types being merged. | |||
merge func(dst, src pointer) | |||
} | |||
var ( | |||
mergeInfoMap = map[reflect.Type]*mergeInfo{} | |||
mergeInfoLock sync.Mutex | |||
) | |||
func getMergeInfo(t reflect.Type) *mergeInfo { | |||
mergeInfoLock.Lock() | |||
defer mergeInfoLock.Unlock() | |||
mi := mergeInfoMap[t] | |||
if mi == nil { | |||
mi = &mergeInfo{typ: t} | |||
mergeInfoMap[t] = mi | |||
} | |||
return mi | |||
} | |||
// merge merges src into dst assuming they are both of type *mi.typ. | |||
func (mi *mergeInfo) merge(dst, src pointer) { | |||
if dst.isNil() { | |||
panic("proto: nil destination") | |||
} | |||
if src.isNil() { | |||
return // Nothing to do. | |||
} | |||
if atomic.LoadInt32(&mi.initialized) == 0 { | |||
mi.computeMergeInfo() | |||
} | |||
for _, fi := range mi.fields { | |||
sfp := src.offset(fi.field) | |||
// As an optimization, we can avoid the merge function call cost | |||
// if we know for sure that the source will have no effect | |||
// by checking if it is the zero value. | |||
if unsafeAllowed { | |||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string | |||
continue | |||
} | |||
if fi.basicWidth > 0 { | |||
switch { | |||
case fi.basicWidth == 1 && !*sfp.toBool(): | |||
continue | |||
case fi.basicWidth == 4 && *sfp.toUint32() == 0: | |||
continue | |||
case fi.basicWidth == 8 && *sfp.toUint64() == 0: | |||
continue | |||
} | |||
} | |||
} | |||
dfp := dst.offset(fi.field) | |||
fi.merge(dfp, sfp) | |||
} | |||
// TODO: Make this faster? | |||
out := dst.asPointerTo(mi.typ).Elem() | |||
in := src.asPointerTo(mi.typ).Elem() | |||
if emIn, err := extendable(in.Addr().Interface()); err == nil { | |||
emOut, _ := extendable(out.Addr().Interface()) | |||
mIn, muIn := emIn.extensionsRead() | |||
if mIn != nil { | |||
mOut := emOut.extensionsWrite() | |||
muIn.Lock() | |||
mergeExtension(mOut, mIn) | |||
muIn.Unlock() | |||
} | |||
} | |||
if mi.unrecognized.IsValid() { | |||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { | |||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) | |||
} | |||
} | |||
} | |||
func (mi *mergeInfo) computeMergeInfo() { | |||
mi.lock.Lock() | |||
defer mi.lock.Unlock() | |||
if mi.initialized != 0 { | |||
return | |||
} | |||
t := mi.typ | |||
n := t.NumField() | |||
props := GetProperties(t) | |||
for i := 0; i < n; i++ { | |||
f := t.Field(i) | |||
if strings.HasPrefix(f.Name, "XXX_") { | |||
continue | |||
} | |||
mfi := mergeFieldInfo{field: toField(&f)} | |||
tf := f.Type | |||
// As an optimization, we can avoid the merge function call cost | |||
// if we know for sure that the source will have no effect | |||
// by checking if it is the zero value. | |||
if unsafeAllowed { | |||
switch tf.Kind() { | |||
case reflect.Ptr, reflect.Slice, reflect.String: | |||
// As a special case, we assume slices and strings are pointers | |||
// since we know that the first field in the SliceSlice or | |||
// StringHeader is a data pointer. | |||
mfi.isPointer = true | |||
case reflect.Bool: | |||
mfi.basicWidth = 1 | |||
case reflect.Int32, reflect.Uint32, reflect.Float32: | |||
mfi.basicWidth = 4 | |||
case reflect.Int64, reflect.Uint64, reflect.Float64: | |||
mfi.basicWidth = 8 | |||
} | |||
} | |||
// Unwrap tf to get at its most basic type. | |||
var isPointer, isSlice bool | |||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { | |||
isSlice = true | |||
tf = tf.Elem() | |||
} | |||
if tf.Kind() == reflect.Ptr { | |||
isPointer = true | |||
tf = tf.Elem() | |||
} | |||
if isPointer && isSlice && tf.Kind() != reflect.Struct { | |||
panic("both pointer and slice for basic type in " + tf.Name()) | |||
} | |||
switch tf.Kind() { | |||
case reflect.Int32: | |||
switch { | |||
case isSlice: // E.g., []int32 | |||
mfi.merge = func(dst, src pointer) { | |||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go). | |||
/* | |||
sfsp := src.toInt32Slice() | |||
if *sfsp != nil { | |||
dfsp := dst.toInt32Slice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []int64{} | |||
} | |||
} | |||
*/ | |||
sfs := src.getInt32Slice() | |||
if sfs != nil { | |||
dfs := dst.getInt32Slice() | |||
dfs = append(dfs, sfs...) | |||
if dfs == nil { | |||
dfs = []int32{} | |||
} | |||
dst.setInt32Slice(dfs) | |||
} | |||
} | |||
case isPointer: // E.g., *int32 | |||
mfi.merge = func(dst, src pointer) { | |||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go). | |||
/* | |||
sfpp := src.toInt32Ptr() | |||
if *sfpp != nil { | |||
dfpp := dst.toInt32Ptr() | |||
if *dfpp == nil { | |||
*dfpp = Int32(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
*/ | |||
sfp := src.getInt32Ptr() | |||
if sfp != nil { | |||
dfp := dst.getInt32Ptr() | |||
if dfp == nil { | |||
dst.setInt32Ptr(*sfp) | |||
} else { | |||
*dfp = *sfp | |||
} | |||
} | |||
} | |||
default: // E.g., int32 | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toInt32(); v != 0 { | |||
*dst.toInt32() = v | |||
} | |||
} | |||
} | |||
case reflect.Int64: | |||
switch { | |||
case isSlice: // E.g., []int64 | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toInt64Slice() | |||
if *sfsp != nil { | |||
dfsp := dst.toInt64Slice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []int64{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *int64 | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toInt64Ptr() | |||
if *sfpp != nil { | |||
dfpp := dst.toInt64Ptr() | |||
if *dfpp == nil { | |||
*dfpp = Int64(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., int64 | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toInt64(); v != 0 { | |||
*dst.toInt64() = v | |||
} | |||
} | |||
} | |||
case reflect.Uint32: | |||
switch { | |||
case isSlice: // E.g., []uint32 | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toUint32Slice() | |||
if *sfsp != nil { | |||
dfsp := dst.toUint32Slice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []uint32{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *uint32 | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toUint32Ptr() | |||
if *sfpp != nil { | |||
dfpp := dst.toUint32Ptr() | |||
if *dfpp == nil { | |||
*dfpp = Uint32(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., uint32 | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toUint32(); v != 0 { | |||
*dst.toUint32() = v | |||
} | |||
} | |||
} | |||
case reflect.Uint64: | |||
switch { | |||
case isSlice: // E.g., []uint64 | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toUint64Slice() | |||
if *sfsp != nil { | |||
dfsp := dst.toUint64Slice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []uint64{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *uint64 | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toUint64Ptr() | |||
if *sfpp != nil { | |||
dfpp := dst.toUint64Ptr() | |||
if *dfpp == nil { | |||
*dfpp = Uint64(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., uint64 | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toUint64(); v != 0 { | |||
*dst.toUint64() = v | |||
} | |||
} | |||
} | |||
case reflect.Float32: | |||
switch { | |||
case isSlice: // E.g., []float32 | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toFloat32Slice() | |||
if *sfsp != nil { | |||
dfsp := dst.toFloat32Slice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []float32{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *float32 | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toFloat32Ptr() | |||
if *sfpp != nil { | |||
dfpp := dst.toFloat32Ptr() | |||
if *dfpp == nil { | |||
*dfpp = Float32(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., float32 | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toFloat32(); v != 0 { | |||
*dst.toFloat32() = v | |||
} | |||
} | |||
} | |||
case reflect.Float64: | |||
switch { | |||
case isSlice: // E.g., []float64 | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toFloat64Slice() | |||
if *sfsp != nil { | |||
dfsp := dst.toFloat64Slice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []float64{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *float64 | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toFloat64Ptr() | |||
if *sfpp != nil { | |||
dfpp := dst.toFloat64Ptr() | |||
if *dfpp == nil { | |||
*dfpp = Float64(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., float64 | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toFloat64(); v != 0 { | |||
*dst.toFloat64() = v | |||
} | |||
} | |||
} | |||
case reflect.Bool: | |||
switch { | |||
case isSlice: // E.g., []bool | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toBoolSlice() | |||
if *sfsp != nil { | |||
dfsp := dst.toBoolSlice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []bool{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *bool | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toBoolPtr() | |||
if *sfpp != nil { | |||
dfpp := dst.toBoolPtr() | |||
if *dfpp == nil { | |||
*dfpp = Bool(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., bool | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toBool(); v { | |||
*dst.toBool() = v | |||
} | |||
} | |||
} | |||
case reflect.String: | |||
switch { | |||
case isSlice: // E.g., []string | |||
mfi.merge = func(dst, src pointer) { | |||
sfsp := src.toStringSlice() | |||
if *sfsp != nil { | |||
dfsp := dst.toStringSlice() | |||
*dfsp = append(*dfsp, *sfsp...) | |||
if *dfsp == nil { | |||
*dfsp = []string{} | |||
} | |||
} | |||
} | |||
case isPointer: // E.g., *string | |||
mfi.merge = func(dst, src pointer) { | |||
sfpp := src.toStringPtr() | |||
if *sfpp != nil { | |||
dfpp := dst.toStringPtr() | |||
if *dfpp == nil { | |||
*dfpp = String(**sfpp) | |||
} else { | |||
**dfpp = **sfpp | |||
} | |||
} | |||
} | |||
default: // E.g., string | |||
mfi.merge = func(dst, src pointer) { | |||
if v := *src.toString(); v != "" { | |||
*dst.toString() = v | |||
} | |||
} | |||
} | |||
case reflect.Slice: | |||
isProto3 := props.Prop[i].proto3 | |||
switch { | |||
case isPointer: | |||
panic("bad pointer in byte slice case in " + tf.Name()) | |||
case tf.Elem().Kind() != reflect.Uint8: | |||
panic("bad element kind in byte slice case in " + tf.Name()) | |||
case isSlice: // E.g., [][]byte | |||
mfi.merge = func(dst, src pointer) { | |||
sbsp := src.toBytesSlice() | |||
if *sbsp != nil { | |||
dbsp := dst.toBytesSlice() | |||
for _, sb := range *sbsp { | |||
if sb == nil { | |||
*dbsp = append(*dbsp, nil) | |||
} else { | |||
*dbsp = append(*dbsp, append([]byte{}, sb...)) | |||
} | |||
} | |||
if *dbsp == nil { | |||
*dbsp = [][]byte{} | |||
} | |||
} | |||
} | |||
default: // E.g., []byte | |||
mfi.merge = func(dst, src pointer) { | |||
sbp := src.toBytes() | |||
if *sbp != nil { | |||
dbp := dst.toBytes() | |||
if !isProto3 || len(*sbp) > 0 { | |||
*dbp = append([]byte{}, *sbp...) | |||
} | |||
} | |||
} | |||
} | |||
case reflect.Struct: | |||
switch { | |||
case !isPointer: | |||
panic(fmt.Sprintf("message field %s without pointer", tf)) | |||
case isSlice: // E.g., []*pb.T | |||
mi := getMergeInfo(tf) | |||
mfi.merge = func(dst, src pointer) { | |||
sps := src.getPointerSlice() | |||
if sps != nil { | |||
dps := dst.getPointerSlice() | |||
for _, sp := range sps { | |||
var dp pointer | |||
if !sp.isNil() { | |||
dp = valToPointer(reflect.New(tf)) | |||
mi.merge(dp, sp) | |||
} | |||
dps = append(dps, dp) | |||
} | |||
if dps == nil { | |||
dps = []pointer{} | |||
} | |||
dst.setPointerSlice(dps) | |||
} | |||
} | |||
default: // E.g., *pb.T | |||
mi := getMergeInfo(tf) | |||
mfi.merge = func(dst, src pointer) { | |||
sp := src.getPointer() | |||
if !sp.isNil() { | |||
dp := dst.getPointer() | |||
if dp.isNil() { | |||
dp = valToPointer(reflect.New(tf)) | |||
dst.setPointer(dp) | |||
} | |||
mi.merge(dp, sp) | |||
} | |||
} | |||
} | |||
case reflect.Map: | |||
switch { | |||
case isPointer || isSlice: | |||
panic("bad pointer or slice in map case in " + tf.Name()) | |||
default: // E.g., map[K]V | |||
mfi.merge = func(dst, src pointer) { | |||
sm := src.asPointerTo(tf).Elem() | |||
if sm.Len() == 0 { | |||
return | |||
} | |||
dm := dst.asPointerTo(tf).Elem() | |||
if dm.IsNil() { | |||
dm.Set(reflect.MakeMap(tf)) | |||
} | |||
switch tf.Elem().Kind() { | |||
case reflect.Ptr: // Proto struct (e.g., *T) | |||
for _, key := range sm.MapKeys() { | |||
val := sm.MapIndex(key) | |||
val = reflect.ValueOf(Clone(val.Interface().(Message))) | |||
dm.SetMapIndex(key, val) | |||
} | |||
case reflect.Slice: // E.g. Bytes type (e.g., []byte) | |||
for _, key := range sm.MapKeys() { | |||
val := sm.MapIndex(key) | |||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) | |||
dm.SetMapIndex(key, val) | |||
} | |||
default: // Basic type (e.g., string) | |||
for _, key := range sm.MapKeys() { | |||
val := sm.MapIndex(key) | |||
dm.SetMapIndex(key, val) | |||
} | |||
} | |||
} | |||
} | |||
case reflect.Interface: | |||
// Must be oneof field. | |||
switch { | |||
case isPointer || isSlice: | |||
panic("bad pointer or slice in interface case in " + tf.Name()) | |||
default: // E.g., interface{} | |||
// TODO: Make this faster? | |||
mfi.merge = func(dst, src pointer) { | |||
su := src.asPointerTo(tf).Elem() | |||
if !su.IsNil() { | |||
du := dst.asPointerTo(tf).Elem() | |||
typ := su.Elem().Type() | |||
if du.IsNil() || du.Elem().Type() != typ { | |||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty | |||
} | |||
sv := su.Elem().Elem().Field(0) | |||
if sv.Kind() == reflect.Ptr && sv.IsNil() { | |||
return | |||
} | |||
dv := du.Elem().Elem().Field(0) | |||
if dv.Kind() == reflect.Ptr && dv.IsNil() { | |||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty | |||
} | |||
switch sv.Type().Kind() { | |||
case reflect.Ptr: // Proto struct (e.g., *T) | |||
Merge(dv.Interface().(Message), sv.Interface().(Message)) | |||
case reflect.Slice: // E.g. Bytes type (e.g., []byte) | |||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) | |||
default: // Basic type (e.g., string) | |||
dv.Set(sv) | |||
} | |||
} | |||
} | |||
} | |||
default: | |||
panic(fmt.Sprintf("merger not found for type:%s", tf)) | |||
} | |||
mi.fields = append(mi.fields, mfi) | |||
} | |||
mi.unrecognized = invalidField | |||
if f, ok := t.FieldByName("XXX_unrecognized"); ok { | |||
if f.Type != reflect.TypeOf([]byte{}) { | |||
panic("expected XXX_unrecognized to be of type []byte") | |||
} | |||
mi.unrecognized = toField(&f) | |||
} | |||
atomic.StoreInt32(&mi.initialized, 1) | |||
} |
@@ -50,7 +50,6 @@ import ( | |||
var ( | |||
newline = []byte("\n") | |||
spaces = []byte(" ") | |||
gtNewline = []byte(">\n") | |||
endBraceNewline = []byte("}\n") | |||
backslashN = []byte{'\\', 'n'} | |||
backslashR = []byte{'\\', 'r'} | |||
@@ -154,7 +153,7 @@ func (w *textWriter) indent() { w.ind++ } | |||
func (w *textWriter) unindent() { | |||
if w.ind == 0 { | |||
log.Printf("proto: textWriter unindented too far") | |||
log.Print("proto: textWriter unindented too far") | |||
return | |||
} | |||
w.ind-- | |||
@@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { | |||
return nil | |||
} | |||
// raw is the interface satisfied by RawMessage. | |||
type raw interface { | |||
Bytes() []byte | |||
} | |||
func requiresQuotes(u string) bool { | |||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. | |||
for _, ch := range u { | |||
@@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
props := sprops.Prop[i] | |||
name := st.Field(i).Name | |||
if name == "XXX_NoUnkeyedLiteral" { | |||
continue | |||
} | |||
if strings.HasPrefix(name, "XXX_") { | |||
// There are two XXX_ fields: | |||
// XXX_unrecognized []byte | |||
@@ -355,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
return err | |||
} | |||
} | |||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil { | |||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { | |||
return err | |||
} | |||
if err := w.WriteByte('\n'); err != nil { | |||
@@ -372,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
return err | |||
} | |||
} | |||
if err := tm.writeAny(w, val, props.mvalprop); err != nil { | |||
if err := tm.writeAny(w, val, props.MapValProp); err != nil { | |||
return err | |||
} | |||
if err := w.WriteByte('\n'); err != nil { | |||
@@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
return err | |||
} | |||
} | |||
if b, ok := fv.Interface().(raw); ok { | |||
if err := writeRaw(w, b.Bytes()); err != nil { | |||
return err | |||
} | |||
continue | |||
} | |||
// Enums have a String method, so writeAny will work fine. | |||
if err := tm.writeAny(w, fv, props); err != nil { | |||
@@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
// Extensions (the XXX_extensions field). | |||
pv := sv.Addr() | |||
if pv.Type().Implements(extendableProtoType) { | |||
if _, err := extendable(pv.Interface()); err == nil { | |||
if err := tm.writeExtensions(w, pv); err != nil { | |||
return err | |||
} | |||
@@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
return nil | |||
} | |||
// writeRaw writes an uninterpreted raw message. | |||
func writeRaw(w *textWriter, b []byte) error { | |||
if err := w.WriteByte('<'); err != nil { | |||
return err | |||
} | |||
if !w.compact { | |||
if err := w.WriteByte('\n'); err != nil { | |||
return err | |||
} | |||
} | |||
w.indent() | |||
if err := writeUnknownStruct(w, b); err != nil { | |||
return err | |||
} | |||
w.unindent() | |||
if err := w.WriteByte('>'); err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
// writeAny writes an arbitrary field. | |||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { | |||
v = reflect.Indirect(v) | |||
@@ -513,7 +484,7 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert | |||
switch v.Kind() { | |||
case reflect.Slice: | |||
// Should only be a []byte; repeated fields are handled in writeStruct. | |||
if err := writeString(w, string(v.Interface().([]byte))); err != nil { | |||
if err := writeString(w, string(v.Bytes())); err != nil { | |||
return err | |||
} | |||
case reflect.String: | |||
@@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert | |||
} | |||
} | |||
w.indent() | |||
if v.CanAddr() { | |||
// Calling v.Interface on a struct causes the reflect package to | |||
// copy the entire struct. This is racy with the new Marshaler | |||
// since we atomically update the XXX_sizecache. | |||
// | |||
// Thus, we retrieve a pointer to the struct if possible to avoid | |||
// a race since v.Interface on the pointer doesn't copy the struct. | |||
// | |||
// If v is not addressable, then we are not worried about a race | |||
// since it implies that the binary Marshaler cannot possibly be | |||
// mutating this value. | |||
v = v.Addr() | |||
} | |||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok { | |||
text, err := etm.MarshalText() | |||
if err != nil { | |||
@@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert | |||
if _, err = w.Write(text); err != nil { | |||
return err | |||
} | |||
} else if err := tm.writeStruct(w, v); err != nil { | |||
return err | |||
} else { | |||
if v.Kind() == reflect.Ptr { | |||
v = v.Elem() | |||
} | |||
if err := tm.writeStruct(w, v); err != nil { | |||
return err | |||
} | |||
} | |||
w.unindent() | |||
if err := w.WriteByte(ket); err != nil { | |||
@@ -689,17 +678,22 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |||
// pv is assumed to be a pointer to a protocol message struct that is extendable. | |||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { | |||
emap := extensionMaps[pv.Type().Elem()] | |||
ep := pv.Interface().(extendableProto) | |||
ep, _ := extendable(pv.Interface()) | |||
// Order the extensions by ID. | |||
// This isn't strictly necessary, but it will give us | |||
// canonical output, which will also make testing easier. | |||
m := ep.ExtensionMap() | |||
m, mu := ep.extensionsRead() | |||
if m == nil { | |||
return nil | |||
} | |||
mu.Lock() | |||
ids := make([]int32, 0, len(m)) | |||
for id := range m { | |||
ids = append(ids, id) | |||
} | |||
sort.Sort(int32Slice(ids)) | |||
mu.Unlock() | |||
for _, extNum := range ids { | |||
ext := m[extNum] | |||
@@ -44,6 +44,9 @@ import ( | |||
"unicode/utf8" | |||
) | |||
// Error string emitted when deserializing Any and fields are already set | |||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" | |||
type ParseError struct { | |||
Message string | |||
Line int // 1-based line number | |||
@@ -203,7 +206,6 @@ func (p *textParser) advance() { | |||
var ( | |||
errBadUTF8 = errors.New("proto: bad UTF-8") | |||
errBadHex = errors.New("proto: bad hexadecimal") | |||
) | |||
func unquoteC(s string, quote rune) (string, error) { | |||
@@ -274,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { | |||
return "?", s, nil // trigraph workaround | |||
case '\'', '"', '\\': | |||
return string(r), s, nil | |||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': | |||
case '0', '1', '2', '3', '4', '5', '6', '7': | |||
if len(s) < 2 { | |||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) | |||
} | |||
base := 8 | |||
ss := s[:2] | |||
ss := string(r) + s[:2] | |||
s = s[2:] | |||
if r == 'x' || r == 'X' { | |||
base = 16 | |||
} else { | |||
ss = string(r) + ss | |||
} | |||
i, err := strconv.ParseUint(ss, base, 8) | |||
i, err := strconv.ParseUint(ss, 8, 8) | |||
if err != nil { | |||
return "", "", err | |||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) | |||
} | |||
return string([]byte{byte(i)}), s, nil | |||
case 'u', 'U': | |||
n := 4 | |||
if r == 'U' { | |||
case 'x', 'X', 'u', 'U': | |||
var n int | |||
switch r { | |||
case 'x', 'X': | |||
n = 2 | |||
case 'u': | |||
n = 4 | |||
case 'U': | |||
n = 8 | |||
} | |||
if len(s) < n { | |||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) | |||
} | |||
bs := make([]byte, n/2) | |||
for i := 0; i < n; i += 2 { | |||
a, ok1 := unhex(s[i]) | |||
b, ok2 := unhex(s[i+1]) | |||
if !ok1 || !ok2 { | |||
return "", "", errBadHex | |||
} | |||
bs[i/2] = a<<4 | b | |||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) | |||
} | |||
ss := s[:n] | |||
s = s[n:] | |||
return string(bs), s, nil | |||
i, err := strconv.ParseUint(ss, 16, 64) | |||
if err != nil { | |||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) | |||
} | |||
if r == 'x' || r == 'X' { | |||
return string([]byte{byte(i)}), s, nil | |||
} | |||
if i > utf8.MaxRune { | |||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) | |||
} | |||
return string(i), s, nil | |||
} | |||
return "", "", fmt.Errorf(`unknown escape \%c`, r) | |||
} | |||
// Adapted from src/pkg/strconv/quote.go. | |||
func unhex(b byte) (v byte, ok bool) { | |||
switch { | |||
case '0' <= b && b <= '9': | |||
return b - '0', true | |||
case 'a' <= b && b <= 'f': | |||
return b - 'a' + 10, true | |||
case 'A' <= b && b <= 'F': | |||
return b - 'A' + 10, true | |||
} | |||
return 0, false | |||
} | |||
// Back off the parser by one token. Can only be done between calls to next(). | |||
// It makes the next advance() a no-op. | |||
func (p *textParser) back() { p.backed = true } | |||
@@ -508,8 +497,16 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
if err != nil { | |||
return p.errorf("failed to marshal message of type %q: %v", messageName, err) | |||
} | |||
if fieldSet["type_url"] { | |||
return p.errorf(anyRepeatedlyUnpacked, "type_url") | |||
} | |||
if fieldSet["value"] { | |||
return p.errorf(anyRepeatedlyUnpacked, "value") | |||
} | |||
sv.FieldByName("TypeUrl").SetString(extName) | |||
sv.FieldByName("Value").SetBytes(b) | |||
fieldSet["type_url"] = true | |||
fieldSet["value"] = true | |||
continue | |||
} | |||
@@ -550,7 +547,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
} | |||
reqFieldErr = err | |||
} | |||
ep := sv.Addr().Interface().(extendableProto) | |||
ep := sv.Addr().Interface().(Message) | |||
if !rep { | |||
SetExtension(ep, desc, ext.Interface()) | |||
} else { | |||
@@ -581,7 +578,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
props = oop.Prop | |||
nv := reflect.New(oop.Type.Elem()) | |||
dst = nv.Elem().Field(0) | |||
sv.Field(oop.Field).Set(nv) | |||
field := sv.Field(oop.Field) | |||
if !field.IsNil() { | |||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) | |||
} | |||
field.Set(nv) | |||
} | |||
if !dst.IsValid() { | |||
return p.errorf("unknown field name %q in %v", name, st) | |||
@@ -602,8 +603,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
// The map entry should be this sequence of tokens: | |||
// < key : KEY value : VALUE > | |||
// Technically the "key" and "value" could come in any order, | |||
// but in practice they won't. | |||
// However, implementations may omit key or value, and technically | |||
// we should support them in any order. See b/28924776 for a time | |||
// this went wrong. | |||
tok := p.next() | |||
var terminator string | |||
@@ -615,32 +617,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
default: | |||
return p.errorf("expected '{' or '<', found %q", tok.value) | |||
} | |||
if err := p.consumeToken("key"); err != nil { | |||
return err | |||
} | |||
if err := p.consumeToken(":"); err != nil { | |||
return err | |||
} | |||
if err := p.readAny(key, props.mkeyprop); err != nil { | |||
return err | |||
} | |||
if err := p.consumeOptionalSeparator(); err != nil { | |||
return err | |||
} | |||
if err := p.consumeToken("value"); err != nil { | |||
return err | |||
} | |||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { | |||
return err | |||
} | |||
if err := p.readAny(val, props.mvalprop); err != nil { | |||
return err | |||
} | |||
if err := p.consumeOptionalSeparator(); err != nil { | |||
return err | |||
} | |||
if err := p.consumeToken(terminator); err != nil { | |||
return err | |||
for { | |||
tok := p.next() | |||
if tok.err != nil { | |||
return tok.err | |||
} | |||
if tok.value == terminator { | |||
break | |||
} | |||
switch tok.value { | |||
case "key": | |||
if err := p.consumeToken(":"); err != nil { | |||
return err | |||
} | |||
if err := p.readAny(key, props.MapKeyProp); err != nil { | |||
return err | |||
} | |||
if err := p.consumeOptionalSeparator(); err != nil { | |||
return err | |||
} | |||
case "value": | |||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { | |||
return err | |||
} | |||
if err := p.readAny(val, props.MapValProp); err != nil { | |||
return err | |||
} | |||
if err := p.consumeOptionalSeparator(); err != nil { | |||
return err | |||
} | |||
default: | |||
p.back() | |||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) | |||
} | |||
} | |||
dst.SetMapIndex(key, val) | |||
@@ -663,7 +672,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
return err | |||
} | |||
reqFieldErr = err | |||
} else if props.Required { | |||
} | |||
if props.Required { | |||
reqCount-- | |||
} | |||
@@ -704,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { | |||
if tok.err != nil { | |||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) | |||
} | |||
if p.done && tok.value != "]" { | |||
return "", p.errorf("unclosed type_url or extension name") | |||
} | |||
} | |||
return strings.Join(parts, ""), nil | |||
} | |||
@@ -772,12 +785,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { | |||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) | |||
return p.readAny(fv.Index(fv.Len()-1), props) | |||
case reflect.Bool: | |||
// Either "true", "false", 1 or 0. | |||
// true/1/t/True or false/f/0/False. | |||
switch tok.value { | |||
case "true", "1": | |||
case "true", "1", "t", "True": | |||
fv.SetBool(true) | |||
return nil | |||
case "false", "0": | |||
case "false", "0", "f", "False": | |||
fv.SetBool(false) | |||
return nil | |||
} | |||
@@ -859,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { | |||
// UnmarshalText returns *RequiredNotSetError. | |||
func UnmarshalText(s string, pb Message) error { | |||
if um, ok := pb.(encoding.TextUnmarshaler); ok { | |||
err := um.UnmarshalText([]byte(s)) | |||
return err | |||
return um.UnmarshalText([]byte(s)) | |||
} | |||
pb.Reset() | |||
v := reflect.ValueOf(pb) | |||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { | |||
return pe | |||
} | |||
return nil | |||
return newTextParser(s).readStruct(v.Elem(), "") | |||
} |
@@ -0,0 +1,201 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "{}" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright {yyyy} {name of copyright owner} | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1 @@ | |||
Copyright 2012 Matt T. Proud (matt.proud@gmail.com) |
@@ -0,0 +1,75 @@ | |||
// Copyright 2013 Matt T. Proud | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package pbutil | |||
import ( | |||
"encoding/binary" | |||
"errors" | |||
"io" | |||
"github.com/golang/protobuf/proto" | |||
) | |||
var errInvalidVarint = errors.New("invalid varint32 encountered") | |||
// ReadDelimited decodes a message from the provided length-delimited stream, | |||
// where the length is encoded as 32-bit varint prefix to the message body. | |||
// It returns the total number of bytes read and any applicable error. This is | |||
// roughly equivalent to the companion Java API's | |||
// MessageLite#parseDelimitedFrom. As per the reader contract, this function | |||
// calls r.Read repeatedly as required until exactly one message including its | |||
// prefix is read and decoded (or an error has occurred). The function never | |||
// reads more bytes from the stream than required. The function never returns | |||
// an error if a message has been read and decoded correctly, even if the end | |||
// of the stream has been reached in doing so. In that case, any subsequent | |||
// calls return (0, io.EOF). | |||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { | |||
// Per AbstractParser#parsePartialDelimitedFrom with | |||
// CodedInputStream#readRawVarint32. | |||
var headerBuf [binary.MaxVarintLen32]byte | |||
var bytesRead, varIntBytes int | |||
var messageLength uint64 | |||
for varIntBytes == 0 { // i.e. no varint has been decoded yet. | |||
if bytesRead >= len(headerBuf) { | |||
return bytesRead, errInvalidVarint | |||
} | |||
// We have to read byte by byte here to avoid reading more bytes | |||
// than required. Each read byte is appended to what we have | |||
// read before. | |||
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) | |||
if newBytesRead == 0 { | |||
if err != nil { | |||
return bytesRead, err | |||
} | |||
// A Reader should not return (0, nil), but if it does, | |||
// it should be treated as no-op (according to the | |||
// Reader contract). So let's go on... | |||
continue | |||
} | |||
bytesRead += newBytesRead | |||
// Now present everything read so far to the varint decoder and | |||
// see if a varint can be decoded already. | |||
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) | |||
} | |||
messageBuf := make([]byte, messageLength) | |||
newBytesRead, err := io.ReadFull(r, messageBuf) | |||
bytesRead += newBytesRead | |||
if err != nil { | |||
return bytesRead, err | |||
} | |||
return bytesRead, proto.Unmarshal(messageBuf, m) | |||
} |
@@ -0,0 +1,16 @@ | |||
// Copyright 2013 Matt T. Proud | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package pbutil provides record length-delimited Protocol Buffer streaming. | |||
package pbutil |
@@ -0,0 +1,46 @@ | |||
// Copyright 2013 Matt T. Proud | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package pbutil | |||
import ( | |||
"encoding/binary" | |||
"io" | |||
"github.com/golang/protobuf/proto" | |||
) | |||
// WriteDelimited encodes and dumps a message to the provided writer prefixed | |||
// with a 32-bit varint indicating the length of the encoded message, producing | |||
// a length-delimited record stream, which can be used to chain together | |||
// encoded messages of the same type together in a file. It returns the total | |||
// number of bytes written and any applicable error. This is roughly | |||
// equivalent to the companion Java API's MessageLite#writeDelimitedTo. | |||
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { | |||
buffer, err := proto.Marshal(m) | |||
if err != nil { | |||
return 0, err | |||
} | |||
var buf [binary.MaxVarintLen32]byte | |||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) | |||
sync, err := w.Write(buf[:encodedLength]) | |||
if err != nil { | |||
return sync, err | |||
} | |||
n, err = w.Write(buffer) | |||
return n + sync, err | |||
} |
@@ -0,0 +1,201 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1,23 @@ | |||
Prometheus instrumentation library for Go applications | |||
Copyright 2012-2015 The Prometheus Authors | |||
This product includes software developed at | |||
SoundCloud Ltd. (http://soundcloud.com/). | |||
The following components are included in this product: | |||
perks - a fork of https://github.com/bmizerany/perks | |||
https://github.com/beorn7/perks | |||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein | |||
See https://github.com/beorn7/perks/blob/master/README.md for license details. | |||
Go support for Protocol Buffers - Google's data interchange format | |||
http://github.com/golang/protobuf/ | |||
Copyright 2010 The Go Authors | |||
See source code for license details. | |||
Support for streaming Protocol Buffer messages for the Go language (golang). | |||
https://github.com/matttproud/golang_protobuf_extensions | |||
Copyright 2013 Matt T. Proud | |||
Licensed under the Apache License, Version 2.0 |
@@ -0,0 +1,120 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
// Collector is the interface implemented by anything that can be used by | |||
// Prometheus to collect metrics. A Collector has to be registered for | |||
// collection. See Registerer.Register. | |||
// | |||
// The stock metrics provided by this package (Gauge, Counter, Summary, | |||
// Histogram, Untyped) are also Collectors (which only ever collect one metric, | |||
// namely itself). An implementer of Collector may, however, collect multiple | |||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples | |||
// for collectors already implemented in this library are the metric vectors | |||
// (i.e. collection of multiple instances of the same Metric but with different | |||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. | |||
type Collector interface { | |||
// Describe sends the super-set of all possible descriptors of metrics | |||
// collected by this Collector to the provided channel and returns once | |||
// the last descriptor has been sent. The sent descriptors fulfill the | |||
// consistency and uniqueness requirements described in the Desc | |||
// documentation. | |||
// | |||
// It is valid if one and the same Collector sends duplicate | |||
// descriptors. Those duplicates are simply ignored. However, two | |||
// different Collectors must not send duplicate descriptors. | |||
// | |||
// Sending no descriptor at all marks the Collector as “unchecked”, | |||
// i.e. no checks will be performed at registration time, and the | |||
// Collector may yield any Metric it sees fit in its Collect method. | |||
// | |||
// This method idempotently sends the same descriptors throughout the | |||
// lifetime of the Collector. It may be called concurrently and | |||
// therefore must be implemented in a concurrency safe way. | |||
// | |||
// If a Collector encounters an error while executing this method, it | |||
// must send an invalid descriptor (created with NewInvalidDesc) to | |||
// signal the error to the registry. | |||
Describe(chan<- *Desc) | |||
// Collect is called by the Prometheus registry when collecting | |||
// metrics. The implementation sends each collected metric via the | |||
// provided channel and returns once the last metric has been sent. The | |||
// descriptor of each sent metric is one of those returned by Describe | |||
// (unless the Collector is unchecked, see above). Returned metrics that | |||
// share the same descriptor must differ in their variable label | |||
// values. | |||
// | |||
// This method may be called concurrently and must therefore be | |||
// implemented in a concurrency safe way. Blocking occurs at the expense | |||
// of total performance of rendering all registered metrics. Ideally, | |||
// Collector implementations support concurrent readers. | |||
Collect(chan<- Metric) | |||
} | |||
// DescribeByCollect is a helper to implement the Describe method of a custom | |||
// Collector. It collects the metrics from the provided Collector and sends | |||
// their descriptors to the provided channel. | |||
// | |||
// If a Collector collects the same metrics throughout its lifetime, its | |||
// Describe method can simply be implemented as: | |||
// | |||
// func (c customCollector) Describe(ch chan<- *Desc) { | |||
// DescribeByCollect(c, ch) | |||
// } | |||
// | |||
// However, this will not work if the metrics collected change dynamically over | |||
// the lifetime of the Collector in a way that their combined set of descriptors | |||
// changes as well. The shortcut implementation will then violate the contract | |||
// of the Describe method. If a Collector sometimes collects no metrics at all | |||
// (for example vectors like CounterVec, GaugeVec, etc., which only collect | |||
// metrics after a metric with a fully specified label set has been accessed), | |||
// it might even get registered as an unchecked Collecter (cf. the Register | |||
// method of the Registerer interface). Hence, only use this shortcut | |||
// implementation of Describe if you are certain to fulfill the contract. | |||
// | |||
// The Collector example demonstrates a use of DescribeByCollect. | |||
func DescribeByCollect(c Collector, descs chan<- *Desc) { | |||
metrics := make(chan Metric) | |||
go func() { | |||
c.Collect(metrics) | |||
close(metrics) | |||
}() | |||
for m := range metrics { | |||
descs <- m.Desc() | |||
} | |||
} | |||
// selfCollector implements Collector for a single Metric so that the Metric | |||
// collects itself. Add it as an anonymous field to a struct that implements | |||
// Metric, and call init with the Metric itself as an argument. | |||
type selfCollector struct { | |||
self Metric | |||
} | |||
// init provides the selfCollector with a reference to the metric it is supposed | |||
// to collect. It is usually called within the factory function to create a | |||
// metric. See example. | |||
func (c *selfCollector) init(self Metric) { | |||
c.self = self | |||
} | |||
// Describe implements Collector. | |||
func (c *selfCollector) Describe(ch chan<- *Desc) { | |||
ch <- c.self.Desc() | |||
} | |||
// Collect implements Collector. | |||
func (c *selfCollector) Collect(ch chan<- Metric) { | |||
ch <- c.self | |||
} |
@@ -0,0 +1,277 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"errors" | |||
"math" | |||
"sync/atomic" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// Counter is a Metric that represents a single numerical value that only ever | |||
// goes up. That implies that it cannot be used to count items whose number can | |||
// also go down, e.g. the number of currently running goroutines. Those | |||
// "counters" are represented by Gauges. | |||
// | |||
// A Counter is typically used to count requests served, tasks completed, errors | |||
// occurred, etc. | |||
// | |||
// To create Counter instances, use NewCounter. | |||
type Counter interface { | |||
Metric | |||
Collector | |||
// Inc increments the counter by 1. Use Add to increment it by arbitrary | |||
// non-negative values. | |||
Inc() | |||
// Add adds the given value to the counter. It panics if the value is < | |||
// 0. | |||
Add(float64) | |||
} | |||
// CounterOpts is an alias for Opts. See there for doc comments. | |||
type CounterOpts Opts | |||
// NewCounter creates a new Counter based on the provided CounterOpts. | |||
// | |||
// The returned implementation tracks the counter value in two separate | |||
// variables, a float64 and a uint64. The latter is used to track calls of the | |||
// Inc method and calls of the Add method with a value that can be represented | |||
// as a uint64. This allows atomic increments of the counter with optimal | |||
// performance. (It is common to have an Inc call in very hot execution paths.) | |||
// Both internal tracking values are added up in the Write method. This has to | |||
// be taken into account when it comes to precision and overflow behavior. | |||
func NewCounter(opts CounterOpts) Counter { | |||
desc := NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
) | |||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs} | |||
result.init(result) // Init self-collection. | |||
return result | |||
} | |||
type counter struct { | |||
// valBits contains the bits of the represented float64 value, while | |||
// valInt stores values that are exact integers. Both have to go first | |||
// in the struct to guarantee alignment for atomic operations. | |||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | |||
valBits uint64 | |||
valInt uint64 | |||
selfCollector | |||
desc *Desc | |||
labelPairs []*dto.LabelPair | |||
} | |||
func (c *counter) Desc() *Desc { | |||
return c.desc | |||
} | |||
func (c *counter) Add(v float64) { | |||
if v < 0 { | |||
panic(errors.New("counter cannot decrease in value")) | |||
} | |||
ival := uint64(v) | |||
if float64(ival) == v { | |||
atomic.AddUint64(&c.valInt, ival) | |||
return | |||
} | |||
for { | |||
oldBits := atomic.LoadUint64(&c.valBits) | |||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v) | |||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { | |||
return | |||
} | |||
} | |||
} | |||
func (c *counter) Inc() { | |||
atomic.AddUint64(&c.valInt, 1) | |||
} | |||
func (c *counter) Write(out *dto.Metric) error { | |||
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) | |||
ival := atomic.LoadUint64(&c.valInt) | |||
val := fval + float64(ival) | |||
return populateMetric(CounterValue, val, c.labelPairs, out) | |||
} | |||
// CounterVec is a Collector that bundles a set of Counters that all share the | |||
// same Desc, but have different values for their variable labels. This is used | |||
// if you want to count the same thing partitioned by various dimensions | |||
// (e.g. number of HTTP requests, partitioned by response code and | |||
// method). Create instances with NewCounterVec. | |||
type CounterVec struct { | |||
*metricVec | |||
} | |||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and | |||
// partitioned by the given label names. | |||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { | |||
desc := NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
labelNames, | |||
opts.ConstLabels, | |||
) | |||
return &CounterVec{ | |||
metricVec: newMetricVec(desc, func(lvs ...string) Metric { | |||
if len(lvs) != len(desc.variableLabels) { | |||
panic(errInconsistentCardinality) | |||
} | |||
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} | |||
result.init(result) // Init self-collection. | |||
return result | |||
}), | |||
} | |||
} | |||
// GetMetricWithLabelValues returns the Counter for the given slice of label | |||
// values (same order as the VariableLabels in Desc). If that combination of | |||
// label values is accessed for the first time, a new Counter is created. | |||
// | |||
// It is possible to call this method without using the returned Counter to only | |||
// create the new Counter but leave it at its starting value 0. See also the | |||
// SummaryVec example. | |||
// | |||
// Keeping the Counter for later use is possible (and should be considered if | |||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and | |||
// Delete can be used to delete the Counter from the CounterVec. In that case, | |||
// the Counter will still exist, but it will not be exported anymore, even if a | |||
// Counter with the same label values is created later. | |||
// | |||
// An error is returned if the number of label values is not the same as the | |||
// number of VariableLabels in Desc (minus any curried labels). | |||
// | |||
// Note that for more than one label value, this method is prone to mistakes | |||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | |||
// an alternative to avoid that type of mistake. For higher label numbers, the | |||
// latter has a much more readable (albeit more verbose) syntax, but it comes | |||
// with a performance overhead (for creating and processing the Labels map). | |||
// See also the GaugeVec example. | |||
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { | |||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) | |||
if metric != nil { | |||
return metric.(Counter), err | |||
} | |||
return nil, err | |||
} | |||
// GetMetricWith returns the Counter for the given Labels map (the label names | |||
// must match those of the VariableLabels in Desc). If that label map is | |||
// accessed for the first time, a new Counter is created. Implications of | |||
// creating a Counter without using it and keeping the Counter for later use are | |||
// the same as for GetMetricWithLabelValues. | |||
// | |||
// An error is returned if the number and names of the Labels are inconsistent | |||
// with those of the VariableLabels in Desc (minus any curried labels). | |||
// | |||
// This method is used for the same purpose as | |||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two | |||
// methods. | |||
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { | |||
metric, err := v.metricVec.getMetricWith(labels) | |||
if metric != nil { | |||
return metric.(Counter), err | |||
} | |||
return nil, err | |||
} | |||
// WithLabelValues works as GetMetricWithLabelValues, but panics where | |||
// GetMetricWithLabelValues would have returned an error. Not returning an | |||
// error allows shortcuts like | |||
// myVec.WithLabelValues("404", "GET").Add(42) | |||
func (v *CounterVec) WithLabelValues(lvs ...string) Counter { | |||
c, err := v.GetMetricWithLabelValues(lvs...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return c | |||
} | |||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have | |||
// returned an error. Not returning an error allows shortcuts like | |||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) | |||
func (v *CounterVec) With(labels Labels) Counter { | |||
c, err := v.GetMetricWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return c | |||
} | |||
// CurryWith returns a vector curried with the provided labels, i.e. the | |||
// returned vector has those labels pre-set for all labeled operations performed | |||
// on it. The cardinality of the curried vector is reduced accordingly. The | |||
// order of the remaining labels stays the same (just with the curried labels | |||
// taken out of the sequence – which is relevant for the | |||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried | |||
// vector, but only with labels not yet used for currying before. | |||
// | |||
// The metrics contained in the CounterVec are shared between the curried and | |||
// uncurried vectors. They are just accessed differently. Curried and uncurried | |||
// vectors behave identically in terms of collection. Only one must be | |||
// registered with a given registry (usually the uncurried version). The Reset | |||
// method deletes all metrics, even if called on a curried vector. | |||
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { | |||
vec, err := v.curryWith(labels) | |||
if vec != nil { | |||
return &CounterVec{vec}, err | |||
} | |||
return nil, err | |||
} | |||
// MustCurryWith works as CurryWith but panics where CurryWith would have | |||
// returned an error. | |||
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { | |||
vec, err := v.CurryWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return vec | |||
} | |||
// CounterFunc is a Counter whose value is determined at collect time by calling a | |||
// provided function. | |||
// | |||
// To create CounterFunc instances, use NewCounterFunc. | |||
type CounterFunc interface { | |||
Metric | |||
Collector | |||
} | |||
// NewCounterFunc creates a new CounterFunc based on the provided | |||
// CounterOpts. The value reported is determined by calling the given function | |||
// from within the Write method. Take into account that metric collection may | |||
// happen concurrently. If that results in concurrent calls to Write, like in | |||
// the case where a CounterFunc is directly registered with Prometheus, the | |||
// provided function must be concurrency-safe. The function should also honor | |||
// the contract for a Counter (values only go up, not down), but compliance will | |||
// not be checked. | |||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { | |||
return newValueFunc(NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
), CounterValue, function) | |||
} |
@@ -0,0 +1,184 @@ | |||
// Copyright 2016 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"errors" | |||
"fmt" | |||
"sort" | |||
"strings" | |||
"github.com/golang/protobuf/proto" | |||
"github.com/prometheus/common/model" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// Desc is the descriptor used by every Prometheus Metric. It is essentially | |||
// the immutable meta-data of a Metric. The normal Metric implementations | |||
// included in this package manage their Desc under the hood. Users only have to | |||
// deal with Desc if they use advanced features like the ExpvarCollector or | |||
// custom Collectors and Metrics. | |||
// | |||
// Descriptors registered with the same registry have to fulfill certain | |||
// consistency and uniqueness criteria if they share the same fully-qualified | |||
// name: They must have the same help string and the same label names (aka label | |||
// dimensions) in each, constLabels and variableLabels, but they must differ in | |||
// the values of the constLabels. | |||
// | |||
// Descriptors that share the same fully-qualified names and the same label | |||
// values of their constLabels are considered equal. | |||
// | |||
// Use NewDesc to create new Desc instances. | |||
type Desc struct { | |||
// fqName has been built from Namespace, Subsystem, and Name. | |||
fqName string | |||
// help provides some helpful information about this metric. | |||
help string | |||
// constLabelPairs contains precalculated DTO label pairs based on | |||
// the constant labels. | |||
constLabelPairs []*dto.LabelPair | |||
// VariableLabels contains names of labels for which the metric | |||
// maintains variable values. | |||
variableLabels []string | |||
// id is a hash of the values of the ConstLabels and fqName. This | |||
// must be unique among all registered descriptors and can therefore be | |||
// used as an identifier of the descriptor. | |||
id uint64 | |||
// dimHash is a hash of the label names (preset and variable) and the | |||
// Help string. Each Desc with the same fqName must have the same | |||
// dimHash. | |||
dimHash uint64 | |||
// err is an error that occurred during construction. It is reported on | |||
// registration time. | |||
err error | |||
} | |||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc | |||
// and will be reported on registration time. variableLabels and constLabels can | |||
// be nil if no such labels should be set. fqName must not be empty. | |||
// | |||
// variableLabels only contain the label names. Their label values are variable | |||
// and therefore not part of the Desc. (They are managed within the Metric.) | |||
// | |||
// For constLabels, the label values are constant. Therefore, they are fully | |||
// specified in the Desc. See the Collector example for a usage pattern. | |||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { | |||
d := &Desc{ | |||
fqName: fqName, | |||
help: help, | |||
variableLabels: variableLabels, | |||
} | |||
if !model.IsValidMetricName(model.LabelValue(fqName)) { | |||
d.err = fmt.Errorf("%q is not a valid metric name", fqName) | |||
return d | |||
} | |||
// labelValues contains the label values of const labels (in order of | |||
// their sorted label names) plus the fqName (at position 0). | |||
labelValues := make([]string, 1, len(constLabels)+1) | |||
labelValues[0] = fqName | |||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) | |||
labelNameSet := map[string]struct{}{} | |||
// First add only the const label names and sort them... | |||
for labelName := range constLabels { | |||
if !checkLabelName(labelName) { | |||
d.err = fmt.Errorf("%q is not a valid label name", labelName) | |||
return d | |||
} | |||
labelNames = append(labelNames, labelName) | |||
labelNameSet[labelName] = struct{}{} | |||
} | |||
sort.Strings(labelNames) | |||
// ... so that we can now add const label values in the order of their names. | |||
for _, labelName := range labelNames { | |||
labelValues = append(labelValues, constLabels[labelName]) | |||
} | |||
// Validate the const label values. They can't have a wrong cardinality, so | |||
// use in len(labelValues) as expectedNumberOfValues. | |||
if err := validateLabelValues(labelValues, len(labelValues)); err != nil { | |||
d.err = err | |||
return d | |||
} | |||
// Now add the variable label names, but prefix them with something that | |||
// cannot be in a regular label name. That prevents matching the label | |||
// dimension with a different mix between preset and variable labels. | |||
for _, labelName := range variableLabels { | |||
if !checkLabelName(labelName) { | |||
d.err = fmt.Errorf("%q is not a valid label name", labelName) | |||
return d | |||
} | |||
labelNames = append(labelNames, "$"+labelName) | |||
labelNameSet[labelName] = struct{}{} | |||
} | |||
if len(labelNames) != len(labelNameSet) { | |||
d.err = errors.New("duplicate label names") | |||
return d | |||
} | |||
vh := hashNew() | |||
for _, val := range labelValues { | |||
vh = hashAdd(vh, val) | |||
vh = hashAddByte(vh, separatorByte) | |||
} | |||
d.id = vh | |||
// Sort labelNames so that order doesn't matter for the hash. | |||
sort.Strings(labelNames) | |||
// Now hash together (in this order) the help string and the sorted | |||
// label names. | |||
lh := hashNew() | |||
lh = hashAdd(lh, help) | |||
lh = hashAddByte(lh, separatorByte) | |||
for _, labelName := range labelNames { | |||
lh = hashAdd(lh, labelName) | |||
lh = hashAddByte(lh, separatorByte) | |||
} | |||
d.dimHash = lh | |||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) | |||
for n, v := range constLabels { | |||
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ | |||
Name: proto.String(n), | |||
Value: proto.String(v), | |||
}) | |||
} | |||
sort.Sort(labelPairSorter(d.constLabelPairs)) | |||
return d | |||
} | |||
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the | |||
// provided error set. If a collector returning such a descriptor is registered, | |||
// registration will fail with the provided error. NewInvalidDesc can be used by | |||
// a Collector to signal inability to describe itself. | |||
func NewInvalidDesc(err error) *Desc { | |||
return &Desc{ | |||
err: err, | |||
} | |||
} | |||
func (d *Desc) String() string { | |||
lpStrings := make([]string, 0, len(d.constLabelPairs)) | |||
for _, lp := range d.constLabelPairs { | |||
lpStrings = append( | |||
lpStrings, | |||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), | |||
) | |||
} | |||
return fmt.Sprintf( | |||
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", | |||
d.fqName, | |||
d.help, | |||
strings.Join(lpStrings, ","), | |||
d.variableLabels, | |||
) | |||
} |
@@ -0,0 +1,201 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package prometheus is the core instrumentation package. It provides metrics | |||
// primitives to instrument code for monitoring. It also offers a registry for | |||
// metrics. Sub-packages allow to expose the registered metrics via HTTP | |||
// (package promhttp) or push them to a Pushgateway (package push). There is | |||
// also a sub-package promauto, which provides metrics constructors with | |||
// automatic registration. | |||
// | |||
// All exported functions and methods are safe to be used concurrently unless | |||
// specified otherwise. | |||
// | |||
// A Basic Example | |||
// | |||
// As a starting point, a very basic usage example: | |||
// | |||
// package main | |||
// | |||
// import ( | |||
// "log" | |||
// "net/http" | |||
// | |||
// "github.com/prometheus/client_golang/prometheus" | |||
// "github.com/prometheus/client_golang/prometheus/promhttp" | |||
// ) | |||
// | |||
// var ( | |||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ | |||
// Name: "cpu_temperature_celsius", | |||
// Help: "Current temperature of the CPU.", | |||
// }) | |||
// hdFailures = prometheus.NewCounterVec( | |||
// prometheus.CounterOpts{ | |||
// Name: "hd_errors_total", | |||
// Help: "Number of hard-disk errors.", | |||
// }, | |||
// []string{"device"}, | |||
// ) | |||
// ) | |||
// | |||
// func init() { | |||
// // Metrics have to be registered to be exposed: | |||
// prometheus.MustRegister(cpuTemp) | |||
// prometheus.MustRegister(hdFailures) | |||
// } | |||
// | |||
// func main() { | |||
// cpuTemp.Set(65.3) | |||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() | |||
// | |||
// // The Handler function provides a default handler to expose metrics | |||
// // via an HTTP server. "/metrics" is the usual endpoint for that. | |||
// http.Handle("/metrics", promhttp.Handler()) | |||
// log.Fatal(http.ListenAndServe(":8080", nil)) | |||
// } | |||
// | |||
// | |||
// This is a complete program that exports two metrics, a Gauge and a Counter, | |||
// the latter with a label attached to turn it into a (one-dimensional) vector. | |||
// | |||
// Metrics | |||
// | |||
// The number of exported identifiers in this package might appear a bit | |||
// overwhelming. However, in addition to the basic plumbing shown in the example | |||
// above, you only need to understand the different metric types and their | |||
// vector versions for basic usage. Furthermore, if you are not concerned with | |||
// fine-grained control of when and how to register metrics with the registry, | |||
// have a look at the promauto package, which will effectively allow you to | |||
// ignore registration altogether in simple cases. | |||
// | |||
// Above, you have already touched the Counter and the Gauge. There are two more | |||
// advanced metric types: the Summary and Histogram. A more thorough description | |||
// of those four metric types can be found in the Prometheus docs: | |||
// https://prometheus.io/docs/concepts/metric_types/ | |||
// | |||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the | |||
// Prometheus server not to assume anything about its type. | |||
// | |||
// In addition to the fundamental metric types Gauge, Counter, Summary, | |||
// Histogram, and Untyped, a very important part of the Prometheus data model is | |||
// the partitioning of samples along dimensions called labels, which results in | |||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, | |||
// HistogramVec, and UntypedVec. | |||
// | |||
// While only the fundamental metric types implement the Metric interface, both | |||
// the metrics and their vector versions implement the Collector interface. A | |||
// Collector manages the collection of a number of Metrics, but for convenience, | |||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, | |||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, | |||
// SummaryVec, HistogramVec, and UntypedVec are not. | |||
// | |||
// To create instances of Metrics and their vector versions, you need a suitable | |||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or | |||
// UntypedOpts. | |||
// | |||
// Custom Collectors and constant Metrics | |||
// | |||
// While you could create your own implementations of Metric, most likely you | |||
// will only ever implement the Collector interface on your own. At a first | |||
// glance, a custom Collector seems handy to bundle Metrics for common | |||
// registration (with the prime example of the different metric vectors above, | |||
// which bundle all the metrics of the same name but with different labels). | |||
// | |||
// There is a more involved use case, too: If you already have metrics | |||
// available, created outside of the Prometheus context, you don't need the | |||
// interface of the various Metric types. You essentially want to mirror the | |||
// existing numbers into Prometheus Metrics during collection. An own | |||
// implementation of the Collector interface is perfect for that. You can create | |||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and | |||
// NewConstSummary (and their respective Must… versions). That will happen in | |||
// the Collect method. The Describe method has to return separate Desc | |||
// instances, representative of the “throw-away” metrics to be created later. | |||
// NewDesc comes in handy to create those Desc instances. Alternatively, you | |||
// could return no Desc at all, which will marke the Collector “unchecked”. No | |||
// checks are porformed at registration time, but metric consistency will still | |||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape | |||
// errors. Thus, with unchecked Collectors, the responsibility to not collect | |||
// metrics that lead to inconsistencies in the total scrape result lies with the | |||
// implementer of the Collector. While this is not a desirable state, it is | |||
// sometimes necessary. The typical use case is a situatios where the exact | |||
// metrics to be returned by a Collector cannot be predicted at registration | |||
// time, but the implementer has sufficient knowledge of the whole system to | |||
// guarantee metric consistency. | |||
// | |||
// The Collector example illustrates the use case. You can also look at the | |||
// source code of the processCollector (mirroring process metrics), the | |||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar | |||
// metrics) as examples that are used in this package itself. | |||
// | |||
// If you just need to call a function to get a single float value to collect as | |||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting | |||
// shortcuts. | |||
// | |||
// Advanced Uses of the Registry | |||
// | |||
// While MustRegister is the by far most common way of registering a Collector, | |||
// sometimes you might want to handle the errors the registration might cause. | |||
// As suggested by the name, MustRegister panics if an error occurs. With the | |||
// Register function, the error is returned and can be handled. | |||
// | |||
// An error is returned if the registered Collector is incompatible or | |||
// inconsistent with already registered metrics. The registry aims for | |||
// consistency of the collected metrics according to the Prometheus data model. | |||
// Inconsistencies are ideally detected at registration time, not at collect | |||
// time. The former will usually be detected at start-up time of a program, | |||
// while the latter will only happen at scrape time, possibly not even on the | |||
// first scrape if the inconsistency only becomes relevant later. That is the | |||
// main reason why a Collector and a Metric have to describe themselves to the | |||
// registry. | |||
// | |||
// So far, everything we did operated on the so-called default registry, as it | |||
// can be found in the global DefaultRegisterer variable. With NewRegistry, you | |||
// can create a custom registry, or you can even implement the Registerer or | |||
// Gatherer interfaces yourself. The methods Register and Unregister work in the | |||
// same way on a custom registry as the global functions Register and Unregister | |||
// on the default registry. | |||
// | |||
// There are a number of uses for custom registries: You can use registries with | |||
// special properties, see NewPedanticRegistry. You can avoid global state, as | |||
// it is imposed by the DefaultRegisterer. You can use multiple registries at | |||
// the same time to expose different metrics in different ways. You can use | |||
// separate registries for testing purposes. | |||
// | |||
// Also note that the DefaultRegisterer comes registered with a Collector for Go | |||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via | |||
// NewProcessCollector). With a custom registry, you are in control and decide | |||
// yourself about the Collectors to register. | |||
// | |||
// HTTP Exposition | |||
// | |||
// The Registry implements the Gatherer interface. The caller of the Gather | |||
// method can then expose the gathered metrics in some way. Usually, the metrics | |||
// are served via HTTP on the /metrics endpoint. That's happening in the example | |||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package. | |||
// (The top-level functions in the prometheus package are deprecated.) | |||
// | |||
// Pushing to the Pushgateway | |||
// | |||
// Function for pushing to the Pushgateway can be found in the push sub-package. | |||
// | |||
// Graphite Bridge | |||
// | |||
// Functions and examples to push metrics from a Gatherer to Graphite can be | |||
// found in the graphite sub-package. | |||
// | |||
// Other Means of Exposition | |||
// | |||
// More ways of exposing metrics can easily be added by following the approaches | |||
// of the existing implementations. | |||
package prometheus |
@@ -0,0 +1,119 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"encoding/json" | |||
"expvar" | |||
) | |||
type expvarCollector struct { | |||
exports map[string]*Desc | |||
} | |||
// NewExpvarCollector returns a newly allocated expvar Collector that still has | |||
// to be registered with a Prometheus registry. | |||
// | |||
// An expvar Collector collects metrics from the expvar interface. It provides a | |||
// quick way to expose numeric values that are already exported via expvar as | |||
// Prometheus metrics. Note that the data models of expvar and Prometheus are | |||
// fundamentally different, and that the expvar Collector is inherently slower | |||
// than native Prometheus metrics. Thus, the expvar Collector is probably great | |||
// for experiments and prototying, but you should seriously consider a more | |||
// direct implementation of Prometheus metrics for monitoring production | |||
// systems. | |||
// | |||
// The exports map has the following meaning: | |||
// | |||
// The keys in the map correspond to expvar keys, i.e. for every expvar key you | |||
// want to export as Prometheus metric, you need an entry in the exports | |||
// map. The descriptor mapped to each key describes how to export the expvar | |||
// value. It defines the name and the help string of the Prometheus metric | |||
// proxying the expvar value. The type will always be Untyped. | |||
// | |||
// For descriptors without variable labels, the expvar value must be a number or | |||
// a bool. The number is then directly exported as the Prometheus sample | |||
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values | |||
// that are not numbers or bools are silently ignored. | |||
// | |||
// If the descriptor has one variable label, the expvar value must be an expvar | |||
// map. The keys in the expvar map become the various values of the one | |||
// Prometheus label. The values in the expvar map must be numbers or bools again | |||
// as above. | |||
// | |||
// For descriptors with more than one variable label, the expvar must be a | |||
// nested expvar map, i.e. where the values of the topmost map are maps again | |||
// etc. until a depth is reached that corresponds to the number of labels. The | |||
// leaves of that structure must be numbers or bools as above to serve as the | |||
// sample values. | |||
// | |||
// Anything that does not fit into the scheme above is silently ignored. | |||
func NewExpvarCollector(exports map[string]*Desc) Collector { | |||
return &expvarCollector{ | |||
exports: exports, | |||
} | |||
} | |||
// Describe implements Collector. | |||
func (e *expvarCollector) Describe(ch chan<- *Desc) { | |||
for _, desc := range e.exports { | |||
ch <- desc | |||
} | |||
} | |||
// Collect implements Collector. | |||
func (e *expvarCollector) Collect(ch chan<- Metric) { | |||
for name, desc := range e.exports { | |||
var m Metric | |||
expVar := expvar.Get(name) | |||
if expVar == nil { | |||
continue | |||
} | |||
var v interface{} | |||
labels := make([]string, len(desc.variableLabels)) | |||
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { | |||
ch <- NewInvalidMetric(desc, err) | |||
continue | |||
} | |||
var processValue func(v interface{}, i int) | |||
processValue = func(v interface{}, i int) { | |||
if i >= len(labels) { | |||
copiedLabels := append(make([]string, 0, len(labels)), labels...) | |||
switch v := v.(type) { | |||
case float64: | |||
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) | |||
case bool: | |||
if v { | |||
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) | |||
} else { | |||
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) | |||
} | |||
default: | |||
return | |||
} | |||
ch <- m | |||
return | |||
} | |||
vm, ok := v.(map[string]interface{}) | |||
if !ok { | |||
return | |||
} | |||
for lv, val := range vm { | |||
labels[i] = lv | |||
processValue(val, i+1) | |||
} | |||
} | |||
processValue(v, 0) | |||
} | |||
} |
@@ -0,0 +1,42 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
// Inline and byte-free variant of hash/fnv's fnv64a. | |||
const ( | |||
offset64 = 14695981039346656037 | |||
prime64 = 1099511628211 | |||
) | |||
// hashNew initializies a new fnv64a hash value. | |||
func hashNew() uint64 { | |||
return offset64 | |||
} | |||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash. | |||
func hashAdd(h uint64, s string) uint64 { | |||
for i := 0; i < len(s); i++ { | |||
h ^= uint64(s[i]) | |||
h *= prime64 | |||
} | |||
return h | |||
} | |||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. | |||
func hashAddByte(h uint64, b byte) uint64 { | |||
h ^= uint64(b) | |||
h *= prime64 | |||
return h | |||
} |
@@ -0,0 +1,286 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"math" | |||
"sync/atomic" | |||
"time" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// Gauge is a Metric that represents a single numerical value that can | |||
// arbitrarily go up and down. | |||
// | |||
// A Gauge is typically used for measured values like temperatures or current | |||
// memory usage, but also "counts" that can go up and down, like the number of | |||
// running goroutines. | |||
// | |||
// To create Gauge instances, use NewGauge. | |||
type Gauge interface { | |||
Metric | |||
Collector | |||
// Set sets the Gauge to an arbitrary value. | |||
Set(float64) | |||
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary | |||
// values. | |||
Inc() | |||
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary | |||
// values. | |||
Dec() | |||
// Add adds the given value to the Gauge. (The value can be negative, | |||
// resulting in a decrease of the Gauge.) | |||
Add(float64) | |||
// Sub subtracts the given value from the Gauge. (The value can be | |||
// negative, resulting in an increase of the Gauge.) | |||
Sub(float64) | |||
// SetToCurrentTime sets the Gauge to the current Unix time in seconds. | |||
SetToCurrentTime() | |||
} | |||
// GaugeOpts is an alias for Opts. See there for doc comments. | |||
type GaugeOpts Opts | |||
// NewGauge creates a new Gauge based on the provided GaugeOpts. | |||
// | |||
// The returned implementation is optimized for a fast Set method. If you have a | |||
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick | |||
// the former. For example, the Inc method of the returned Gauge is slower than | |||
// the Inc method of a Counter returned by NewCounter. This matches the typical | |||
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and | |||
// the latter Inc-heavy. | |||
func NewGauge(opts GaugeOpts) Gauge { | |||
desc := NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
) | |||
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} | |||
result.init(result) // Init self-collection. | |||
return result | |||
} | |||
type gauge struct { | |||
// valBits contains the bits of the represented float64 value. It has | |||
// to go first in the struct to guarantee alignment for atomic | |||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG | |||
valBits uint64 | |||
selfCollector | |||
desc *Desc | |||
labelPairs []*dto.LabelPair | |||
} | |||
func (g *gauge) Desc() *Desc { | |||
return g.desc | |||
} | |||
func (g *gauge) Set(val float64) { | |||
atomic.StoreUint64(&g.valBits, math.Float64bits(val)) | |||
} | |||
func (g *gauge) SetToCurrentTime() { | |||
g.Set(float64(time.Now().UnixNano()) / 1e9) | |||
} | |||
func (g *gauge) Inc() { | |||
g.Add(1) | |||
} | |||
func (g *gauge) Dec() { | |||
g.Add(-1) | |||
} | |||
func (g *gauge) Add(val float64) { | |||
for { | |||
oldBits := atomic.LoadUint64(&g.valBits) | |||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val) | |||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { | |||
return | |||
} | |||
} | |||
} | |||
func (g *gauge) Sub(val float64) { | |||
g.Add(val * -1) | |||
} | |||
func (g *gauge) Write(out *dto.Metric) error { | |||
val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) | |||
return populateMetric(GaugeValue, val, g.labelPairs, out) | |||
} | |||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same | |||
// Desc, but have different values for their variable labels. This is used if | |||
// you want to count the same thing partitioned by various dimensions | |||
// (e.g. number of operations queued, partitioned by user and operation | |||
// type). Create instances with NewGaugeVec. | |||
type GaugeVec struct { | |||
*metricVec | |||
} | |||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and | |||
// partitioned by the given label names. | |||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { | |||
desc := NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
labelNames, | |||
opts.ConstLabels, | |||
) | |||
return &GaugeVec{ | |||
metricVec: newMetricVec(desc, func(lvs ...string) Metric { | |||
if len(lvs) != len(desc.variableLabels) { | |||
panic(errInconsistentCardinality) | |||
} | |||
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} | |||
result.init(result) // Init self-collection. | |||
return result | |||
}), | |||
} | |||
} | |||
// GetMetricWithLabelValues returns the Gauge for the given slice of label | |||
// values (same order as the VariableLabels in Desc). If that combination of | |||
// label values is accessed for the first time, a new Gauge is created. | |||
// | |||
// It is possible to call this method without using the returned Gauge to only | |||
// create the new Gauge but leave it at its starting value 0. See also the | |||
// SummaryVec example. | |||
// | |||
// Keeping the Gauge for later use is possible (and should be considered if | |||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and | |||
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the | |||
// Gauge will still exist, but it will not be exported anymore, even if a | |||
// Gauge with the same label values is created later. See also the CounterVec | |||
// example. | |||
// | |||
// An error is returned if the number of label values is not the same as the | |||
// number of VariableLabels in Desc (minus any curried labels). | |||
// | |||
// Note that for more than one label value, this method is prone to mistakes | |||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | |||
// an alternative to avoid that type of mistake. For higher label numbers, the | |||
// latter has a much more readable (albeit more verbose) syntax, but it comes | |||
// with a performance overhead (for creating and processing the Labels map). | |||
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { | |||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) | |||
if metric != nil { | |||
return metric.(Gauge), err | |||
} | |||
return nil, err | |||
} | |||
// GetMetricWith returns the Gauge for the given Labels map (the label names | |||
// must match those of the VariableLabels in Desc). If that label map is | |||
// accessed for the first time, a new Gauge is created. Implications of | |||
// creating a Gauge without using it and keeping the Gauge for later use are | |||
// the same as for GetMetricWithLabelValues. | |||
// | |||
// An error is returned if the number and names of the Labels are inconsistent | |||
// with those of the VariableLabels in Desc (minus any curried labels). | |||
// | |||
// This method is used for the same purpose as | |||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two | |||
// methods. | |||
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { | |||
metric, err := v.metricVec.getMetricWith(labels) | |||
if metric != nil { | |||
return metric.(Gauge), err | |||
} | |||
return nil, err | |||
} | |||
// WithLabelValues works as GetMetricWithLabelValues, but panics where | |||
// GetMetricWithLabelValues would have returned an error. Not returning an | |||
// error allows shortcuts like | |||
// myVec.WithLabelValues("404", "GET").Add(42) | |||
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { | |||
g, err := v.GetMetricWithLabelValues(lvs...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return g | |||
} | |||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have | |||
// returned an error. Not returning an error allows shortcuts like | |||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) | |||
func (v *GaugeVec) With(labels Labels) Gauge { | |||
g, err := v.GetMetricWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return g | |||
} | |||
// CurryWith returns a vector curried with the provided labels, i.e. the | |||
// returned vector has those labels pre-set for all labeled operations performed | |||
// on it. The cardinality of the curried vector is reduced accordingly. The | |||
// order of the remaining labels stays the same (just with the curried labels | |||
// taken out of the sequence – which is relevant for the | |||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried | |||
// vector, but only with labels not yet used for currying before. | |||
// | |||
// The metrics contained in the GaugeVec are shared between the curried and | |||
// uncurried vectors. They are just accessed differently. Curried and uncurried | |||
// vectors behave identically in terms of collection. Only one must be | |||
// registered with a given registry (usually the uncurried version). The Reset | |||
// method deletes all metrics, even if called on a curried vector. | |||
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { | |||
vec, err := v.curryWith(labels) | |||
if vec != nil { | |||
return &GaugeVec{vec}, err | |||
} | |||
return nil, err | |||
} | |||
// MustCurryWith works as CurryWith but panics where CurryWith would have | |||
// returned an error. | |||
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { | |||
vec, err := v.CurryWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return vec | |||
} | |||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a | |||
// provided function. | |||
// | |||
// To create GaugeFunc instances, use NewGaugeFunc. | |||
type GaugeFunc interface { | |||
Metric | |||
Collector | |||
} | |||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The | |||
// value reported is determined by calling the given function from within the | |||
// Write method. Take into account that metric collection may happen | |||
// concurrently. If that results in concurrent calls to Write, like in the case | |||
// where a GaugeFunc is directly registered with Prometheus, the provided | |||
// function must be concurrency-safe. | |||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { | |||
return newValueFunc(NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
), GaugeValue, function) | |||
} |
@@ -0,0 +1,301 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"fmt" | |||
"runtime" | |||
"runtime/debug" | |||
"time" | |||
) | |||
type goCollector struct { | |||
goroutinesDesc *Desc | |||
threadsDesc *Desc | |||
gcDesc *Desc | |||
goInfoDesc *Desc | |||
// metrics to describe and collect | |||
metrics memStatsMetrics | |||
} | |||
// NewGoCollector returns a collector which exports metrics about the current Go | |||
// process. This includes memory stats. To collect those, runtime.ReadMemStats | |||
// is called. This causes a stop-the-world, which is very short with Go1.9+ | |||
// (~25µs). However, with older Go versions, the stop-the-world duration depends | |||
// on the heap size and can be quite significant (~1.7 ms/GiB as per | |||
// https://go-review.googlesource.com/c/go/+/34937). | |||
func NewGoCollector() Collector { | |||
return &goCollector{ | |||
goroutinesDesc: NewDesc( | |||
"go_goroutines", | |||
"Number of goroutines that currently exist.", | |||
nil, nil), | |||
threadsDesc: NewDesc( | |||
"go_threads", | |||
"Number of OS threads created.", | |||
nil, nil), | |||
gcDesc: NewDesc( | |||
"go_gc_duration_seconds", | |||
"A summary of the GC invocation durations.", | |||
nil, nil), | |||
goInfoDesc: NewDesc( | |||
"go_info", | |||
"Information about the Go environment.", | |||
nil, Labels{"version": runtime.Version()}), | |||
metrics: memStatsMetrics{ | |||
{ | |||
desc: NewDesc( | |||
memstatNamespace("alloc_bytes"), | |||
"Number of bytes allocated and still in use.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("alloc_bytes_total"), | |||
"Total number of bytes allocated, even if freed.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, | |||
valType: CounterValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("sys_bytes"), | |||
"Number of bytes obtained from system.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("lookups_total"), | |||
"Total number of pointer lookups.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, | |||
valType: CounterValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("mallocs_total"), | |||
"Total number of mallocs.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, | |||
valType: CounterValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("frees_total"), | |||
"Total number of frees.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, | |||
valType: CounterValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("heap_alloc_bytes"), | |||
"Number of heap bytes allocated and still in use.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("heap_sys_bytes"), | |||
"Number of heap bytes obtained from system.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("heap_idle_bytes"), | |||
"Number of heap bytes waiting to be used.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("heap_inuse_bytes"), | |||
"Number of heap bytes that are in use.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("heap_released_bytes"), | |||
"Number of heap bytes released to OS.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("heap_objects"), | |||
"Number of allocated objects.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("stack_inuse_bytes"), | |||
"Number of bytes in use by the stack allocator.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("stack_sys_bytes"), | |||
"Number of bytes obtained from system for stack allocator.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("mspan_inuse_bytes"), | |||
"Number of bytes in use by mspan structures.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("mspan_sys_bytes"), | |||
"Number of bytes used for mspan structures obtained from system.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("mcache_inuse_bytes"), | |||
"Number of bytes in use by mcache structures.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("mcache_sys_bytes"), | |||
"Number of bytes used for mcache structures obtained from system.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("buck_hash_sys_bytes"), | |||
"Number of bytes used by the profiling bucket hash table.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("gc_sys_bytes"), | |||
"Number of bytes used for garbage collection system metadata.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("other_sys_bytes"), | |||
"Number of bytes used for other system allocations.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("next_gc_bytes"), | |||
"Number of heap bytes when next garbage collection will take place.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("last_gc_time_seconds"), | |||
"Number of seconds since 1970 of last garbage collection.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, | |||
valType: GaugeValue, | |||
}, { | |||
desc: NewDesc( | |||
memstatNamespace("gc_cpu_fraction"), | |||
"The fraction of this program's available CPU time used by the GC since the program started.", | |||
nil, nil, | |||
), | |||
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, | |||
valType: GaugeValue, | |||
}, | |||
}, | |||
} | |||
} | |||
func memstatNamespace(s string) string { | |||
return fmt.Sprintf("go_memstats_%s", s) | |||
} | |||
// Describe returns all descriptions of the collector. | |||
func (c *goCollector) Describe(ch chan<- *Desc) { | |||
ch <- c.goroutinesDesc | |||
ch <- c.threadsDesc | |||
ch <- c.gcDesc | |||
ch <- c.goInfoDesc | |||
for _, i := range c.metrics { | |||
ch <- i.desc | |||
} | |||
} | |||
// Collect returns the current state of all metrics of the collector. | |||
func (c *goCollector) Collect(ch chan<- Metric) { | |||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) | |||
n, _ := runtime.ThreadCreateProfile(nil) | |||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) | |||
var stats debug.GCStats | |||
stats.PauseQuantiles = make([]time.Duration, 5) | |||
debug.ReadGCStats(&stats) | |||
quantiles := make(map[float64]float64) | |||
for idx, pq := range stats.PauseQuantiles[1:] { | |||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() | |||
} | |||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds() | |||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) | |||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) | |||
ms := &runtime.MemStats{} | |||
runtime.ReadMemStats(ms) | |||
for _, i := range c.metrics { | |||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) | |||
} | |||
} | |||
// memStatsMetrics provide description, value, and value type for memstat metrics. | |||
type memStatsMetrics []struct { | |||
desc *Desc | |||
eval func(*runtime.MemStats) float64 | |||
valType ValueType | |||
} |
@@ -0,0 +1,614 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"fmt" | |||
"math" | |||
"runtime" | |||
"sort" | |||
"sync" | |||
"sync/atomic" | |||
"github.com/golang/protobuf/proto" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// A Histogram counts individual observations from an event or sample stream in | |||
// configurable buckets. Similar to a summary, it also provides a sum of | |||
// observations and an observation count. | |||
// | |||
// On the Prometheus server, quantiles can be calculated from a Histogram using | |||
// the histogram_quantile function in the query language. | |||
// | |||
// Note that Histograms, in contrast to Summaries, can be aggregated with the | |||
// Prometheus query language (see the documentation for detailed | |||
// procedures). However, Histograms require the user to pre-define suitable | |||
// buckets, and they are in general less accurate. The Observe method of a | |||
// Histogram has a very low performance overhead in comparison with the Observe | |||
// method of a Summary. | |||
// | |||
// To create Histogram instances, use NewHistogram. | |||
type Histogram interface { | |||
Metric | |||
Collector | |||
// Observe adds a single observation to the histogram. | |||
Observe(float64) | |||
} | |||
// bucketLabel is used for the label that defines the upper bound of a | |||
// bucket of a histogram ("le" -> "less or equal"). | |||
const bucketLabel = "le" | |||
// DefBuckets are the default Histogram buckets. The default buckets are | |||
// tailored to broadly measure the response time (in seconds) of a network | |||
// service. Most likely, however, you will be required to define buckets | |||
// customized to your use case. | |||
var ( | |||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} | |||
errBucketLabelNotAllowed = fmt.Errorf( | |||
"%q is not allowed as label name in histograms", bucketLabel, | |||
) | |||
) | |||
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest | |||
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted | |||
// and not included in the returned slice. The returned slice is meant to be | |||
// used for the Buckets field of HistogramOpts. | |||
// | |||
// The function panics if 'count' is zero or negative. | |||
func LinearBuckets(start, width float64, count int) []float64 { | |||
if count < 1 { | |||
panic("LinearBuckets needs a positive count") | |||
} | |||
buckets := make([]float64, count) | |||
for i := range buckets { | |||
buckets[i] = start | |||
start += width | |||
} | |||
return buckets | |||
} | |||
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an | |||
// upper bound of 'start' and each following bucket's upper bound is 'factor' | |||
// times the previous bucket's upper bound. The final +Inf bucket is not counted | |||
// and not included in the returned slice. The returned slice is meant to be | |||
// used for the Buckets field of HistogramOpts. | |||
// | |||
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, | |||
// or if 'factor' is less than or equal 1. | |||
func ExponentialBuckets(start, factor float64, count int) []float64 { | |||
if count < 1 { | |||
panic("ExponentialBuckets needs a positive count") | |||
} | |||
if start <= 0 { | |||
panic("ExponentialBuckets needs a positive start value") | |||
} | |||
if factor <= 1 { | |||
panic("ExponentialBuckets needs a factor greater than 1") | |||
} | |||
buckets := make([]float64, count) | |||
for i := range buckets { | |||
buckets[i] = start | |||
start *= factor | |||
} | |||
return buckets | |||
} | |||
// HistogramOpts bundles the options for creating a Histogram metric. It is | |||
// mandatory to set Name to a non-empty string. All other fields are optional | |||
// and can safely be left at their zero value, although it is strongly | |||
// encouraged to set a Help string. | |||
type HistogramOpts struct { | |||
// Namespace, Subsystem, and Name are components of the fully-qualified | |||
// name of the Histogram (created by joining these components with | |||
// "_"). Only Name is mandatory, the others merely help structuring the | |||
// name. Note that the fully-qualified name of the Histogram must be a | |||
// valid Prometheus metric name. | |||
Namespace string | |||
Subsystem string | |||
Name string | |||
// Help provides information about this Histogram. | |||
// | |||
// Metrics with the same fully-qualified name must have the same Help | |||
// string. | |||
Help string | |||
// ConstLabels are used to attach fixed labels to this metric. Metrics | |||
// with the same fully-qualified name must have the same label names in | |||
// their ConstLabels. | |||
// | |||
// ConstLabels are only used rarely. In particular, do not use them to | |||
// attach the same labels to all your metrics. Those use cases are | |||
// better covered by target labels set by the scraping Prometheus | |||
// server, or by one specific metric (e.g. a build_info or a | |||
// machine_role metric). See also | |||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels | |||
ConstLabels Labels | |||
// Buckets defines the buckets into which observations are counted. Each | |||
// element in the slice is the upper inclusive bound of a bucket. The | |||
// values must be sorted in strictly increasing order. There is no need | |||
// to add a highest bucket with +Inf bound, it will be added | |||
// implicitly. The default value is DefBuckets. | |||
Buckets []float64 | |||
} | |||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It | |||
// panics if the buckets in HistogramOpts are not in strictly increasing order. | |||
func NewHistogram(opts HistogramOpts) Histogram { | |||
return newHistogram( | |||
NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
), | |||
opts, | |||
) | |||
} | |||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { | |||
if len(desc.variableLabels) != len(labelValues) { | |||
panic(errInconsistentCardinality) | |||
} | |||
for _, n := range desc.variableLabels { | |||
if n == bucketLabel { | |||
panic(errBucketLabelNotAllowed) | |||
} | |||
} | |||
for _, lp := range desc.constLabelPairs { | |||
if lp.GetName() == bucketLabel { | |||
panic(errBucketLabelNotAllowed) | |||
} | |||
} | |||
if len(opts.Buckets) == 0 { | |||
opts.Buckets = DefBuckets | |||
} | |||
h := &histogram{ | |||
desc: desc, | |||
upperBounds: opts.Buckets, | |||
labelPairs: makeLabelPairs(desc, labelValues), | |||
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, | |||
} | |||
for i, upperBound := range h.upperBounds { | |||
if i < len(h.upperBounds)-1 { | |||
if upperBound >= h.upperBounds[i+1] { | |||
panic(fmt.Errorf( | |||
"histogram buckets must be in increasing order: %f >= %f", | |||
upperBound, h.upperBounds[i+1], | |||
)) | |||
} | |||
} else { | |||
if math.IsInf(upperBound, +1) { | |||
// The +Inf bucket is implicit. Remove it here. | |||
h.upperBounds = h.upperBounds[:i] | |||
} | |||
} | |||
} | |||
// Finally we know the final length of h.upperBounds and can make counts | |||
// for both states: | |||
h.counts[0].buckets = make([]uint64, len(h.upperBounds)) | |||
h.counts[1].buckets = make([]uint64, len(h.upperBounds)) | |||
h.init(h) // Init self-collection. | |||
return h | |||
} | |||
type histogramCounts struct { | |||
// sumBits contains the bits of the float64 representing the sum of all | |||
// observations. sumBits and count have to go first in the struct to | |||
// guarantee alignment for atomic operations. | |||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | |||
sumBits uint64 | |||
count uint64 | |||
buckets []uint64 | |||
} | |||
type histogram struct { | |||
// countAndHotIdx is a complicated one. For lock-free yet atomic | |||
// observations, we need to save the total count of observations again, | |||
// combined with the index of the currently-hot counts struct, so that | |||
// we can perform the operation on both values atomically. The least | |||
// significant bit defines the hot counts struct. The remaining 63 bits | |||
// represent the total count of observations. This happens under the | |||
// assumption that the 63bit count will never overflow. Rationale: An | |||
// observations takes about 30ns. Let's assume it could happen in | |||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns, | |||
// which is about 3000 years. | |||
// | |||
// This has to be first in the struct for 64bit alignment. See | |||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG | |||
countAndHotIdx uint64 | |||
selfCollector | |||
desc *Desc | |||
writeMtx sync.Mutex // Only used in the Write method. | |||
upperBounds []float64 | |||
// Two counts, one is "hot" for lock-free observations, the other is | |||
// "cold" for writing out a dto.Metric. It has to be an array of | |||
// pointers to guarantee 64bit alignment of the histogramCounts, see | |||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG. | |||
counts [2]*histogramCounts | |||
hotIdx int // Index of currently-hot counts. Only used within Write. | |||
labelPairs []*dto.LabelPair | |||
} | |||
func (h *histogram) Desc() *Desc { | |||
return h.desc | |||
} | |||
func (h *histogram) Observe(v float64) { | |||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is | |||
// slightly faster than the binary search. If we really care, we could | |||
// switch from one search strategy to the other depending on the number | |||
// of buckets. | |||
// | |||
// Microbenchmarks (BenchmarkHistogramNoLabels): | |||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op | |||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op | |||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op | |||
i := sort.SearchFloat64s(h.upperBounds, v) | |||
// We increment h.countAndHotIdx by 2 so that the counter in the upper | |||
// 63 bits gets incremented by 1. At the same time, we get the new value | |||
// back, which we can use to find the currently-hot counts. | |||
n := atomic.AddUint64(&h.countAndHotIdx, 2) | |||
hotCounts := h.counts[n%2] | |||
if i < len(h.upperBounds) { | |||
atomic.AddUint64(&hotCounts.buckets[i], 1) | |||
} | |||
for { | |||
oldBits := atomic.LoadUint64(&hotCounts.sumBits) | |||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v) | |||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | |||
break | |||
} | |||
} | |||
// Increment count last as we take it as a signal that the observation | |||
// is complete. | |||
atomic.AddUint64(&hotCounts.count, 1) | |||
} | |||
func (h *histogram) Write(out *dto.Metric) error { | |||
var ( | |||
his = &dto.Histogram{} | |||
buckets = make([]*dto.Bucket, len(h.upperBounds)) | |||
hotCounts, coldCounts *histogramCounts | |||
count uint64 | |||
) | |||
// For simplicity, we mutex the rest of this method. It is not in the | |||
// hot path, i.e. Observe is called much more often than Write. The | |||
// complication of making Write lock-free isn't worth it. | |||
h.writeMtx.Lock() | |||
defer h.writeMtx.Unlock() | |||
// This is a bit arcane, which is why the following spells out this if | |||
// clause in English: | |||
// | |||
// If the currently-hot counts struct is #0, we atomically increment | |||
// h.countAndHotIdx by 1 so that from now on Observe will use the counts | |||
// struct #1. Furthermore, the atomic increment gives us the new value, | |||
// which, in its most significant 63 bits, tells us the count of | |||
// observations done so far up to and including currently ongoing | |||
// observations still using the counts struct just changed from hot to | |||
// cold. To have a normal uint64 for the count, we bitshift by 1 and | |||
// save the result in count. We also set h.hotIdx to 1 for the next | |||
// Write call, and we will refer to counts #1 as hotCounts and to counts | |||
// #0 as coldCounts. | |||
// | |||
// If the currently-hot counts struct is #1, we do the corresponding | |||
// things the other way round. We have to _decrement_ h.countAndHotIdx | |||
// (which is a bit arcane in itself, as we have to express -1 with an | |||
// unsigned int...). | |||
if h.hotIdx == 0 { | |||
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 | |||
h.hotIdx = 1 | |||
hotCounts = h.counts[1] | |||
coldCounts = h.counts[0] | |||
} else { | |||
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. | |||
h.hotIdx = 0 | |||
hotCounts = h.counts[0] | |||
coldCounts = h.counts[1] | |||
} | |||
// Now we have to wait for the now-declared-cold counts to actually cool | |||
// down, i.e. wait for all observations still using it to finish. That's | |||
// the case once the count in the cold counts struct is the same as the | |||
// one atomically retrieved from the upper 63bits of h.countAndHotIdx. | |||
for { | |||
if count == atomic.LoadUint64(&coldCounts.count) { | |||
break | |||
} | |||
runtime.Gosched() // Let observations get work done. | |||
} | |||
his.SampleCount = proto.Uint64(count) | |||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) | |||
var cumCount uint64 | |||
for i, upperBound := range h.upperBounds { | |||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) | |||
buckets[i] = &dto.Bucket{ | |||
CumulativeCount: proto.Uint64(cumCount), | |||
UpperBound: proto.Float64(upperBound), | |||
} | |||
} | |||
his.Bucket = buckets | |||
out.Histogram = his | |||
out.Label = h.labelPairs | |||
// Finally add all the cold counts to the new hot counts and reset the cold counts. | |||
atomic.AddUint64(&hotCounts.count, count) | |||
atomic.StoreUint64(&coldCounts.count, 0) | |||
for { | |||
oldBits := atomic.LoadUint64(&hotCounts.sumBits) | |||
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) | |||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { | |||
atomic.StoreUint64(&coldCounts.sumBits, 0) | |||
break | |||
} | |||
} | |||
for i := range h.upperBounds { | |||
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) | |||
atomic.StoreUint64(&coldCounts.buckets[i], 0) | |||
} | |||
return nil | |||
} | |||
// HistogramVec is a Collector that bundles a set of Histograms that all share the | |||
// same Desc, but have different values for their variable labels. This is used | |||
// if you want to count the same thing partitioned by various dimensions | |||
// (e.g. HTTP request latencies, partitioned by status code and method). Create | |||
// instances with NewHistogramVec. | |||
type HistogramVec struct { | |||
*metricVec | |||
} | |||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and | |||
// partitioned by the given label names. | |||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { | |||
desc := NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
labelNames, | |||
opts.ConstLabels, | |||
) | |||
return &HistogramVec{ | |||
metricVec: newMetricVec(desc, func(lvs ...string) Metric { | |||
return newHistogram(desc, opts, lvs...) | |||
}), | |||
} | |||
} | |||
// GetMetricWithLabelValues returns the Histogram for the given slice of label | |||
// values (same order as the VariableLabels in Desc). If that combination of | |||
// label values is accessed for the first time, a new Histogram is created. | |||
// | |||
// It is possible to call this method without using the returned Histogram to only | |||
// create the new Histogram but leave it at its starting value, a Histogram without | |||
// any observations. | |||
// | |||
// Keeping the Histogram for later use is possible (and should be considered if | |||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and | |||
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the | |||
// Histogram will still exist, but it will not be exported anymore, even if a | |||
// Histogram with the same label values is created later. See also the CounterVec | |||
// example. | |||
// | |||
// An error is returned if the number of label values is not the same as the | |||
// number of VariableLabels in Desc (minus any curried labels). | |||
// | |||
// Note that for more than one label value, this method is prone to mistakes | |||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | |||
// an alternative to avoid that type of mistake. For higher label numbers, the | |||
// latter has a much more readable (albeit more verbose) syntax, but it comes | |||
// with a performance overhead (for creating and processing the Labels map). | |||
// See also the GaugeVec example. | |||
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { | |||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) | |||
if metric != nil { | |||
return metric.(Observer), err | |||
} | |||
return nil, err | |||
} | |||
// GetMetricWith returns the Histogram for the given Labels map (the label names | |||
// must match those of the VariableLabels in Desc). If that label map is | |||
// accessed for the first time, a new Histogram is created. Implications of | |||
// creating a Histogram without using it and keeping the Histogram for later use | |||
// are the same as for GetMetricWithLabelValues. | |||
// | |||
// An error is returned if the number and names of the Labels are inconsistent | |||
// with those of the VariableLabels in Desc (minus any curried labels). | |||
// | |||
// This method is used for the same purpose as | |||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two | |||
// methods. | |||
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { | |||
metric, err := v.metricVec.getMetricWith(labels) | |||
if metric != nil { | |||
return metric.(Observer), err | |||
} | |||
return nil, err | |||
} | |||
// WithLabelValues works as GetMetricWithLabelValues, but panics where | |||
// GetMetricWithLabelValues would have returned an error. Not returning an | |||
// error allows shortcuts like | |||
// myVec.WithLabelValues("404", "GET").Observe(42.21) | |||
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { | |||
h, err := v.GetMetricWithLabelValues(lvs...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return h | |||
} | |||
// With works as GetMetricWith but panics where GetMetricWithLabels would have | |||
// returned an error. Not returning an error allows shortcuts like | |||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) | |||
func (v *HistogramVec) With(labels Labels) Observer { | |||
h, err := v.GetMetricWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return h | |||
} | |||
// CurryWith returns a vector curried with the provided labels, i.e. the | |||
// returned vector has those labels pre-set for all labeled operations performed | |||
// on it. The cardinality of the curried vector is reduced accordingly. The | |||
// order of the remaining labels stays the same (just with the curried labels | |||
// taken out of the sequence – which is relevant for the | |||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried | |||
// vector, but only with labels not yet used for currying before. | |||
// | |||
// The metrics contained in the HistogramVec are shared between the curried and | |||
// uncurried vectors. They are just accessed differently. Curried and uncurried | |||
// vectors behave identically in terms of collection. Only one must be | |||
// registered with a given registry (usually the uncurried version). The Reset | |||
// method deletes all metrics, even if called on a curried vector. | |||
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { | |||
vec, err := v.curryWith(labels) | |||
if vec != nil { | |||
return &HistogramVec{vec}, err | |||
} | |||
return nil, err | |||
} | |||
// MustCurryWith works as CurryWith but panics where CurryWith would have | |||
// returned an error. | |||
func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { | |||
vec, err := v.CurryWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return vec | |||
} | |||
type constHistogram struct { | |||
desc *Desc | |||
count uint64 | |||
sum float64 | |||
buckets map[float64]uint64 | |||
labelPairs []*dto.LabelPair | |||
} | |||
func (h *constHistogram) Desc() *Desc { | |||
return h.desc | |||
} | |||
func (h *constHistogram) Write(out *dto.Metric) error { | |||
his := &dto.Histogram{} | |||
buckets := make([]*dto.Bucket, 0, len(h.buckets)) | |||
his.SampleCount = proto.Uint64(h.count) | |||
his.SampleSum = proto.Float64(h.sum) | |||
for upperBound, count := range h.buckets { | |||
buckets = append(buckets, &dto.Bucket{ | |||
CumulativeCount: proto.Uint64(count), | |||
UpperBound: proto.Float64(upperBound), | |||
}) | |||
} | |||
if len(buckets) > 0 { | |||
sort.Sort(buckSort(buckets)) | |||
} | |||
his.Bucket = buckets | |||
out.Histogram = his | |||
out.Label = h.labelPairs | |||
return nil | |||
} | |||
// NewConstHistogram returns a metric representing a Prometheus histogram with | |||
// fixed values for the count, sum, and bucket counts. As those parameters | |||
// cannot be changed, the returned value does not implement the Histogram | |||
// interface (but only the Metric interface). Users of this package will not | |||
// have much use for it in regular operations. However, when implementing custom | |||
// Collectors, it is useful as a throw-away metric that is generated on the fly | |||
// to send it to Prometheus in the Collect method. | |||
// | |||
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf | |||
// bucket. | |||
// | |||
// NewConstHistogram returns an error if the length of labelValues is not | |||
// consistent with the variable labels in Desc or if Desc is invalid. | |||
func NewConstHistogram( | |||
desc *Desc, | |||
count uint64, | |||
sum float64, | |||
buckets map[float64]uint64, | |||
labelValues ...string, | |||
) (Metric, error) { | |||
if desc.err != nil { | |||
return nil, desc.err | |||
} | |||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | |||
return nil, err | |||
} | |||
return &constHistogram{ | |||
desc: desc, | |||
count: count, | |||
sum: sum, | |||
buckets: buckets, | |||
labelPairs: makeLabelPairs(desc, labelValues), | |||
}, nil | |||
} | |||
// MustNewConstHistogram is a version of NewConstHistogram that panics where | |||
// NewConstMetric would have returned an error. | |||
func MustNewConstHistogram( | |||
desc *Desc, | |||
count uint64, | |||
sum float64, | |||
buckets map[float64]uint64, | |||
labelValues ...string, | |||
) Metric { | |||
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return m | |||
} | |||
type buckSort []*dto.Bucket | |||
func (s buckSort) Len() int { | |||
return len(s) | |||
} | |||
func (s buckSort) Swap(i, j int) { | |||
s[i], s[j] = s[j], s[i] | |||
} | |||
func (s buckSort) Less(i, j int) bool { | |||
return s[i].GetUpperBound() < s[j].GetUpperBound() | |||
} |
@@ -0,0 +1,505 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"bufio" | |||
"bytes" | |||
"compress/gzip" | |||
"fmt" | |||
"io" | |||
"net" | |||
"net/http" | |||
"strconv" | |||
"strings" | |||
"sync" | |||
"time" | |||
"github.com/prometheus/common/expfmt" | |||
) | |||
// TODO(beorn7): Remove this whole file. It is a partial mirror of | |||
// promhttp/http.go (to avoid circular import chains) where everything HTTP | |||
// related should live. The functions here are just for avoiding | |||
// breakage. Everything is deprecated. | |||
const ( | |||
contentTypeHeader = "Content-Type" | |||
contentLengthHeader = "Content-Length" | |||
contentEncodingHeader = "Content-Encoding" | |||
acceptEncodingHeader = "Accept-Encoding" | |||
) | |||
var bufPool sync.Pool | |||
func getBuf() *bytes.Buffer { | |||
buf := bufPool.Get() | |||
if buf == nil { | |||
return &bytes.Buffer{} | |||
} | |||
return buf.(*bytes.Buffer) | |||
} | |||
func giveBuf(buf *bytes.Buffer) { | |||
buf.Reset() | |||
bufPool.Put(buf) | |||
} | |||
// Handler returns an HTTP handler for the DefaultGatherer. It is | |||
// already instrumented with InstrumentHandler (using "prometheus" as handler | |||
// name). | |||
// | |||
// Deprecated: Please note the issues described in the doc comment of | |||
// InstrumentHandler. You might want to consider using promhttp.Handler instead. | |||
func Handler() http.Handler { | |||
return InstrumentHandler("prometheus", UninstrumentedHandler()) | |||
} | |||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. | |||
// | |||
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) | |||
// instead. See there for further documentation. | |||
func UninstrumentedHandler() http.Handler { | |||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | |||
mfs, err := DefaultGatherer.Gather() | |||
if err != nil { | |||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
contentType := expfmt.Negotiate(req.Header) | |||
buf := getBuf() | |||
defer giveBuf(buf) | |||
writer, encoding := decorateWriter(req, buf) | |||
enc := expfmt.NewEncoder(writer, contentType) | |||
var lastErr error | |||
for _, mf := range mfs { | |||
if err := enc.Encode(mf); err != nil { | |||
lastErr = err | |||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
} | |||
if closer, ok := writer.(io.Closer); ok { | |||
closer.Close() | |||
} | |||
if lastErr != nil && buf.Len() == 0 { | |||
http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
header := w.Header() | |||
header.Set(contentTypeHeader, string(contentType)) | |||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) | |||
if encoding != "" { | |||
header.Set(contentEncodingHeader, encoding) | |||
} | |||
w.Write(buf.Bytes()) | |||
}) | |||
} | |||
// decorateWriter wraps a writer to handle gzip compression if requested. It | |||
// returns the decorated writer and the appropriate "Content-Encoding" header | |||
// (which is empty if no compression is enabled). | |||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { | |||
header := request.Header.Get(acceptEncodingHeader) | |||
parts := strings.Split(header, ",") | |||
for _, part := range parts { | |||
part = strings.TrimSpace(part) | |||
if part == "gzip" || strings.HasPrefix(part, "gzip;") { | |||
return gzip.NewWriter(writer), "gzip" | |||
} | |||
} | |||
return writer, "" | |||
} | |||
var instLabels = []string{"method", "code"} | |||
type nower interface { | |||
Now() time.Time | |||
} | |||
type nowFunc func() time.Time | |||
func (n nowFunc) Now() time.Time { | |||
return n() | |||
} | |||
var now nower = nowFunc(func() time.Time { | |||
return time.Now() | |||
}) | |||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It | |||
// registers four metric collectors (if not already done) and reports HTTP | |||
// metrics to the (newly or already) registered collectors: http_requests_total | |||
// (CounterVec), http_request_duration_microseconds (Summary), | |||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each | |||
// has a constant label named "handler" with the provided handlerName as | |||
// value. http_requests_total is a metric vector partitioned by HTTP method | |||
// (label name "method") and HTTP status code (label name "code"). | |||
// | |||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in | |||
// package promhttp instead. The issues are the following: (1) It uses Summaries | |||
// rather than Histograms. Summaries are not useful if aggregation across | |||
// multiple instances is required. (2) It uses microseconds as unit, which is | |||
// deprecated and should be replaced by seconds. (3) The size of the request is | |||
// calculated in a separate goroutine. Since this calculator requires access to | |||
// the request header, it creates a race with any writes to the header performed | |||
// during request handling. httputil.ReverseProxy is a prominent example for a | |||
// handler performing such writes. (4) It has additional issues with HTTP/2, cf. | |||
// https://github.com/prometheus/client_golang/issues/272. | |||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { | |||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) | |||
} | |||
// InstrumentHandlerFunc wraps the given function for instrumentation. It | |||
// otherwise works in the same way as InstrumentHandler (and shares the same | |||
// issues). | |||
// | |||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as | |||
// InstrumentHandler is. Use the tooling provided in package promhttp instead. | |||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { | |||
return InstrumentHandlerFuncWithOpts( | |||
SummaryOpts{ | |||
Subsystem: "http", | |||
ConstLabels: Labels{"handler": handlerName}, | |||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, | |||
}, | |||
handlerFunc, | |||
) | |||
} | |||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same | |||
// issues) but provides more flexibility (at the cost of a more complex call | |||
// syntax). As InstrumentHandler, this function registers four metric | |||
// collectors, but it uses the provided SummaryOpts to create them. However, the | |||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced | |||
// by "requests_total", "request_duration_microseconds", "request_size_bytes", | |||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate | |||
// help string. The names of the variable labels of the http_requests_total | |||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). | |||
// | |||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the | |||
// behavior of InstrumentHandler: | |||
// | |||
// prometheus.InstrumentHandlerWithOpts( | |||
// prometheus.SummaryOpts{ | |||
// Subsystem: "http", | |||
// ConstLabels: prometheus.Labels{"handler": handlerName}, | |||
// }, | |||
// handler, | |||
// ) | |||
// | |||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it | |||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, | |||
// and all its fields are set to the equally named fields in the provided | |||
// SummaryOpts. | |||
// | |||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as | |||
// InstrumentHandler is. Use the tooling provided in package promhttp instead. | |||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { | |||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) | |||
} | |||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares | |||
// the same issues) but provides more flexibility (at the cost of a more complex | |||
// call syntax). See InstrumentHandlerWithOpts for details how the provided | |||
// SummaryOpts are used. | |||
// | |||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons | |||
// as InstrumentHandler is. Use the tooling provided in package promhttp instead. | |||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { | |||
reqCnt := NewCounterVec( | |||
CounterOpts{ | |||
Namespace: opts.Namespace, | |||
Subsystem: opts.Subsystem, | |||
Name: "requests_total", | |||
Help: "Total number of HTTP requests made.", | |||
ConstLabels: opts.ConstLabels, | |||
}, | |||
instLabels, | |||
) | |||
if err := Register(reqCnt); err != nil { | |||
if are, ok := err.(AlreadyRegisteredError); ok { | |||
reqCnt = are.ExistingCollector.(*CounterVec) | |||
} else { | |||
panic(err) | |||
} | |||
} | |||
opts.Name = "request_duration_microseconds" | |||
opts.Help = "The HTTP request latencies in microseconds." | |||
reqDur := NewSummary(opts) | |||
if err := Register(reqDur); err != nil { | |||
if are, ok := err.(AlreadyRegisteredError); ok { | |||
reqDur = are.ExistingCollector.(Summary) | |||
} else { | |||
panic(err) | |||
} | |||
} | |||
opts.Name = "request_size_bytes" | |||
opts.Help = "The HTTP request sizes in bytes." | |||
reqSz := NewSummary(opts) | |||
if err := Register(reqSz); err != nil { | |||
if are, ok := err.(AlreadyRegisteredError); ok { | |||
reqSz = are.ExistingCollector.(Summary) | |||
} else { | |||
panic(err) | |||
} | |||
} | |||
opts.Name = "response_size_bytes" | |||
opts.Help = "The HTTP response sizes in bytes." | |||
resSz := NewSummary(opts) | |||
if err := Register(resSz); err != nil { | |||
if are, ok := err.(AlreadyRegisteredError); ok { | |||
resSz = are.ExistingCollector.(Summary) | |||
} else { | |||
panic(err) | |||
} | |||
} | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
now := time.Now() | |||
delegate := &responseWriterDelegator{ResponseWriter: w} | |||
out := computeApproximateRequestSize(r) | |||
_, cn := w.(http.CloseNotifier) | |||
_, fl := w.(http.Flusher) | |||
_, hj := w.(http.Hijacker) | |||
_, rf := w.(io.ReaderFrom) | |||
var rw http.ResponseWriter | |||
if cn && fl && hj && rf { | |||
rw = &fancyResponseWriterDelegator{delegate} | |||
} else { | |||
rw = delegate | |||
} | |||
handlerFunc(rw, r) | |||
elapsed := float64(time.Since(now)) / float64(time.Microsecond) | |||
method := sanitizeMethod(r.Method) | |||
code := sanitizeCode(delegate.status) | |||
reqCnt.WithLabelValues(method, code).Inc() | |||
reqDur.Observe(elapsed) | |||
resSz.Observe(float64(delegate.written)) | |||
reqSz.Observe(float64(<-out)) | |||
}) | |||
} | |||
func computeApproximateRequestSize(r *http.Request) <-chan int { | |||
// Get URL length in current goroutine for avoiding a race condition. | |||
// HandlerFunc that runs in parallel may modify the URL. | |||
s := 0 | |||
if r.URL != nil { | |||
s += len(r.URL.String()) | |||
} | |||
out := make(chan int, 1) | |||
go func() { | |||
s += len(r.Method) | |||
s += len(r.Proto) | |||
for name, values := range r.Header { | |||
s += len(name) | |||
for _, value := range values { | |||
s += len(value) | |||
} | |||
} | |||
s += len(r.Host) | |||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. | |||
if r.ContentLength != -1 { | |||
s += int(r.ContentLength) | |||
} | |||
out <- s | |||
close(out) | |||
}() | |||
return out | |||
} | |||
type responseWriterDelegator struct { | |||
http.ResponseWriter | |||
status int | |||
written int64 | |||
wroteHeader bool | |||
} | |||
func (r *responseWriterDelegator) WriteHeader(code int) { | |||
r.status = code | |||
r.wroteHeader = true | |||
r.ResponseWriter.WriteHeader(code) | |||
} | |||
func (r *responseWriterDelegator) Write(b []byte) (int, error) { | |||
if !r.wroteHeader { | |||
r.WriteHeader(http.StatusOK) | |||
} | |||
n, err := r.ResponseWriter.Write(b) | |||
r.written += int64(n) | |||
return n, err | |||
} | |||
type fancyResponseWriterDelegator struct { | |||
*responseWriterDelegator | |||
} | |||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { | |||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify() | |||
} | |||
func (f *fancyResponseWriterDelegator) Flush() { | |||
f.ResponseWriter.(http.Flusher).Flush() | |||
} | |||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { | |||
return f.ResponseWriter.(http.Hijacker).Hijack() | |||
} | |||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { | |||
if !f.wroteHeader { | |||
f.WriteHeader(http.StatusOK) | |||
} | |||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) | |||
f.written += n | |||
return n, err | |||
} | |||
func sanitizeMethod(m string) string { | |||
switch m { | |||
case "GET", "get": | |||
return "get" | |||
case "PUT", "put": | |||
return "put" | |||
case "HEAD", "head": | |||
return "head" | |||
case "POST", "post": | |||
return "post" | |||
case "DELETE", "delete": | |||
return "delete" | |||
case "CONNECT", "connect": | |||
return "connect" | |||
case "OPTIONS", "options": | |||
return "options" | |||
case "NOTIFY", "notify": | |||
return "notify" | |||
default: | |||
return strings.ToLower(m) | |||
} | |||
} | |||
func sanitizeCode(s int) string { | |||
switch s { | |||
case 100: | |||
return "100" | |||
case 101: | |||
return "101" | |||
case 200: | |||
return "200" | |||
case 201: | |||
return "201" | |||
case 202: | |||
return "202" | |||
case 203: | |||
return "203" | |||
case 204: | |||
return "204" | |||
case 205: | |||
return "205" | |||
case 206: | |||
return "206" | |||
case 300: | |||
return "300" | |||
case 301: | |||
return "301" | |||
case 302: | |||
return "302" | |||
case 304: | |||
return "304" | |||
case 305: | |||
return "305" | |||
case 307: | |||
return "307" | |||
case 400: | |||
return "400" | |||
case 401: | |||
return "401" | |||
case 402: | |||
return "402" | |||
case 403: | |||
return "403" | |||
case 404: | |||
return "404" | |||
case 405: | |||
return "405" | |||
case 406: | |||
return "406" | |||
case 407: | |||
return "407" | |||
case 408: | |||
return "408" | |||
case 409: | |||
return "409" | |||
case 410: | |||
return "410" | |||
case 411: | |||
return "411" | |||
case 412: | |||
return "412" | |||
case 413: | |||
return "413" | |||
case 414: | |||
return "414" | |||
case 415: | |||
return "415" | |||
case 416: | |||
return "416" | |||
case 417: | |||
return "417" | |||
case 418: | |||
return "418" | |||
case 500: | |||
return "500" | |||
case 501: | |||
return "501" | |||
case 502: | |||
return "502" | |||
case 503: | |||
return "503" | |||
case 504: | |||
return "504" | |||
case 505: | |||
return "505" | |||
case 428: | |||
return "428" | |||
case 429: | |||
return "429" | |||
case 431: | |||
return "431" | |||
case 511: | |||
return "511" | |||
default: | |||
return strconv.Itoa(s) | |||
} | |||
} |
@@ -0,0 +1,85 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package internal | |||
import ( | |||
"sort" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// metricSorter is a sortable slice of *dto.Metric. | |||
type metricSorter []*dto.Metric | |||
func (s metricSorter) Len() int { | |||
return len(s) | |||
} | |||
func (s metricSorter) Swap(i, j int) { | |||
s[i], s[j] = s[j], s[i] | |||
} | |||
func (s metricSorter) Less(i, j int) bool { | |||
if len(s[i].Label) != len(s[j].Label) { | |||
// This should not happen. The metrics are | |||
// inconsistent. However, we have to deal with the fact, as | |||
// people might use custom collectors or metric family injection | |||
// to create inconsistent metrics. So let's simply compare the | |||
// number of labels in this case. That will still yield | |||
// reproducible sorting. | |||
return len(s[i].Label) < len(s[j].Label) | |||
} | |||
for n, lp := range s[i].Label { | |||
vi := lp.GetValue() | |||
vj := s[j].Label[n].GetValue() | |||
if vi != vj { | |||
return vi < vj | |||
} | |||
} | |||
// We should never arrive here. Multiple metrics with the same | |||
// label set in the same scrape will lead to undefined ingestion | |||
// behavior. However, as above, we have to provide stable sorting | |||
// here, even for inconsistent metrics. So sort equal metrics | |||
// by their timestamp, with missing timestamps (implying "now") | |||
// coming last. | |||
if s[i].TimestampMs == nil { | |||
return false | |||
} | |||
if s[j].TimestampMs == nil { | |||
return true | |||
} | |||
return s[i].GetTimestampMs() < s[j].GetTimestampMs() | |||
} | |||
// NormalizeMetricFamilies returns a MetricFamily slice with empty | |||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within | |||
// the slice, with the contained Metrics sorted within each MetricFamily. | |||
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { | |||
for _, mf := range metricFamiliesByName { | |||
sort.Sort(metricSorter(mf.Metric)) | |||
} | |||
names := make([]string, 0, len(metricFamiliesByName)) | |||
for name, mf := range metricFamiliesByName { | |||
if len(mf.Metric) > 0 { | |||
names = append(names, name) | |||
} | |||
} | |||
sort.Strings(names) | |||
result := make([]*dto.MetricFamily, 0, len(names)) | |||
for _, name := range names { | |||
result = append(result, metricFamiliesByName[name]) | |||
} | |||
return result | |||
} |
@@ -0,0 +1,70 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"errors" | |||
"fmt" | |||
"strings" | |||
"unicode/utf8" | |||
"github.com/prometheus/common/model" | |||
) | |||
// Labels represents a collection of label name -> value mappings. This type is | |||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of | |||
// metric vector Collectors, e.g.: | |||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) | |||
// | |||
// The other use-case is the specification of constant label pairs in Opts or to | |||
// create a Desc. | |||
type Labels map[string]string | |||
// reservedLabelPrefix is a prefix which is not legal in user-supplied | |||
// label names. | |||
const reservedLabelPrefix = "__" | |||
var errInconsistentCardinality = errors.New("inconsistent label cardinality") | |||
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { | |||
if len(labels) != expectedNumberOfValues { | |||
return errInconsistentCardinality | |||
} | |||
for name, val := range labels { | |||
if !utf8.ValidString(val) { | |||
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) | |||
} | |||
} | |||
return nil | |||
} | |||
func validateLabelValues(vals []string, expectedNumberOfValues int) error { | |||
if len(vals) != expectedNumberOfValues { | |||
return errInconsistentCardinality | |||
} | |||
for _, val := range vals { | |||
if !utf8.ValidString(val) { | |||
return fmt.Errorf("label value %q is not valid UTF-8", val) | |||
} | |||
} | |||
return nil | |||
} | |||
func checkLabelName(l string) bool { | |||
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) | |||
} |
@@ -0,0 +1,174 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"strings" | |||
"time" | |||
"github.com/golang/protobuf/proto" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
const separatorByte byte = 255 | |||
// A Metric models a single sample value with its meta data being exported to | |||
// Prometheus. Implementations of Metric in this package are Gauge, Counter, | |||
// Histogram, Summary, and Untyped. | |||
type Metric interface { | |||
// Desc returns the descriptor for the Metric. This method idempotently | |||
// returns the same descriptor throughout the lifetime of the | |||
// Metric. The returned descriptor is immutable by contract. A Metric | |||
// unable to describe itself must return an invalid descriptor (created | |||
// with NewInvalidDesc). | |||
Desc() *Desc | |||
// Write encodes the Metric into a "Metric" Protocol Buffer data | |||
// transmission object. | |||
// | |||
// Metric implementations must observe concurrency safety as reads of | |||
// this metric may occur at any time, and any blocking occurs at the | |||
// expense of total performance of rendering all registered | |||
// metrics. Ideally, Metric implementations should support concurrent | |||
// readers. | |||
// | |||
// While populating dto.Metric, it is the responsibility of the | |||
// implementation to ensure validity of the Metric protobuf (like valid | |||
// UTF-8 strings or syntactically valid metric and label names). It is | |||
// recommended to sort labels lexicographically. Callers of Write should | |||
// still make sure of sorting if they depend on it. | |||
Write(*dto.Metric) error | |||
// TODO(beorn7): The original rationale of passing in a pre-allocated | |||
// dto.Metric protobuf to save allocations has disappeared. The | |||
// signature of this method should be changed to "Write() (*dto.Metric, | |||
// error)". | |||
} | |||
// Opts bundles the options for creating most Metric types. Each metric | |||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be | |||
// an alias of this type (which might change when the requirement arises.) | |||
// | |||
// It is mandatory to set Name to a non-empty string. All other fields are | |||
// optional and can safely be left at their zero value, although it is strongly | |||
// encouraged to set a Help string. | |||
type Opts struct { | |||
// Namespace, Subsystem, and Name are components of the fully-qualified | |||
// name of the Metric (created by joining these components with | |||
// "_"). Only Name is mandatory, the others merely help structuring the | |||
// name. Note that the fully-qualified name of the metric must be a | |||
// valid Prometheus metric name. | |||
Namespace string | |||
Subsystem string | |||
Name string | |||
// Help provides information about this metric. | |||
// | |||
// Metrics with the same fully-qualified name must have the same Help | |||
// string. | |||
Help string | |||
// ConstLabels are used to attach fixed labels to this metric. Metrics | |||
// with the same fully-qualified name must have the same label names in | |||
// their ConstLabels. | |||
// | |||
// ConstLabels are only used rarely. In particular, do not use them to | |||
// attach the same labels to all your metrics. Those use cases are | |||
// better covered by target labels set by the scraping Prometheus | |||
// server, or by one specific metric (e.g. a build_info or a | |||
// machine_role metric). See also | |||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels | |||
ConstLabels Labels | |||
} | |||
// BuildFQName joins the given three name components by "_". Empty name | |||
// components are ignored. If the name parameter itself is empty, an empty | |||
// string is returned, no matter what. Metric implementations included in this | |||
// library use this function internally to generate the fully-qualified metric | |||
// name from the name component in their Opts. Users of the library will only | |||
// need this function if they implement their own Metric or instantiate a Desc | |||
// (with NewDesc) directly. | |||
func BuildFQName(namespace, subsystem, name string) string { | |||
if name == "" { | |||
return "" | |||
} | |||
switch { | |||
case namespace != "" && subsystem != "": | |||
return strings.Join([]string{namespace, subsystem, name}, "_") | |||
case namespace != "": | |||
return strings.Join([]string{namespace, name}, "_") | |||
case subsystem != "": | |||
return strings.Join([]string{subsystem, name}, "_") | |||
} | |||
return name | |||
} | |||
// labelPairSorter implements sort.Interface. It is used to sort a slice of | |||
// dto.LabelPair pointers. | |||
type labelPairSorter []*dto.LabelPair | |||
func (s labelPairSorter) Len() int { | |||
return len(s) | |||
} | |||
func (s labelPairSorter) Swap(i, j int) { | |||
s[i], s[j] = s[j], s[i] | |||
} | |||
func (s labelPairSorter) Less(i, j int) bool { | |||
return s[i].GetName() < s[j].GetName() | |||
} | |||
type invalidMetric struct { | |||
desc *Desc | |||
err error | |||
} | |||
// NewInvalidMetric returns a metric whose Write method always returns the | |||
// provided error. It is useful if a Collector finds itself unable to collect | |||
// a metric and wishes to report an error to the registry. | |||
func NewInvalidMetric(desc *Desc, err error) Metric { | |||
return &invalidMetric{desc, err} | |||
} | |||
func (m *invalidMetric) Desc() *Desc { return m.desc } | |||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err } | |||
type timestampedMetric struct { | |||
Metric | |||
t time.Time | |||
} | |||
func (m timestampedMetric) Write(pb *dto.Metric) error { | |||
e := m.Metric.Write(pb) | |||
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) | |||
return e | |||
} | |||
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a | |||
// way that it has an explicit timestamp set to the provided Time. This is only | |||
// useful in rare cases as the timestamp of a Prometheus metric should usually | |||
// be set by the Prometheus server during scraping. Exceptions include mirroring | |||
// metrics with given timestamps from other metric | |||
// sources. | |||
// | |||
// NewMetricWithTimestamp works best with MustNewConstMetric, | |||
// MustNewConstHistogram, and MustNewConstSummary, see example. | |||
// | |||
// Currently, the exposition formats used by Prometheus are limited to | |||
// millisecond resolution. Thus, the provided time will be rounded down to the | |||
// next full millisecond value. | |||
func NewMetricWithTimestamp(t time.Time, m Metric) Metric { | |||
return timestampedMetric{Metric: m, t: t} | |||
} |
@@ -0,0 +1,52 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
// Observer is the interface that wraps the Observe method, which is used by | |||
// Histogram and Summary to add observations. | |||
type Observer interface { | |||
Observe(float64) | |||
} | |||
// The ObserverFunc type is an adapter to allow the use of ordinary | |||
// functions as Observers. If f is a function with the appropriate | |||
// signature, ObserverFunc(f) is an Observer that calls f. | |||
// | |||
// This adapter is usually used in connection with the Timer type, and there are | |||
// two general use cases: | |||
// | |||
// The most common one is to use a Gauge as the Observer for a Timer. | |||
// See the "Gauge" Timer example. | |||
// | |||
// The more advanced use case is to create a function that dynamically decides | |||
// which Observer to use for observing the duration. See the "Complex" Timer | |||
// example. | |||
type ObserverFunc func(float64) | |||
// Observe calls f(value). It implements Observer. | |||
func (f ObserverFunc) Observe(value float64) { | |||
f(value) | |||
} | |||
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. | |||
type ObserverVec interface { | |||
GetMetricWith(Labels) (Observer, error) | |||
GetMetricWithLabelValues(lvs ...string) (Observer, error) | |||
With(Labels) Observer | |||
WithLabelValues(...string) Observer | |||
CurryWith(Labels) (ObserverVec, error) | |||
MustCurryWith(Labels) ObserverVec | |||
Collector | |||
} |
@@ -0,0 +1,204 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"errors" | |||
"os" | |||
"github.com/prometheus/procfs" | |||
) | |||
type processCollector struct { | |||
collectFn func(chan<- Metric) | |||
pidFn func() (int, error) | |||
reportErrors bool | |||
cpuTotal *Desc | |||
openFDs, maxFDs *Desc | |||
vsize, maxVsize *Desc | |||
rss *Desc | |||
startTime *Desc | |||
} | |||
// ProcessCollectorOpts defines the behavior of a process metrics collector | |||
// created with NewProcessCollector. | |||
type ProcessCollectorOpts struct { | |||
// PidFn returns the PID of the process the collector collects metrics | |||
// for. It is called upon each collection. By default, the PID of the | |||
// current process is used, as determined on construction time by | |||
// calling os.Getpid(). | |||
PidFn func() (int, error) | |||
// If non-empty, each of the collected metrics is prefixed by the | |||
// provided string and an underscore ("_"). | |||
Namespace string | |||
// If true, any error encountered during collection is reported as an | |||
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored | |||
// and the collected metrics will be incomplete. (Possibly, no metrics | |||
// will be collected at all.) While that's usually not desired, it is | |||
// appropriate for the common "mix-in" of process metrics, where process | |||
// metrics are nice to have, but failing to collect them should not | |||
// disrupt the collection of the remaining metrics. | |||
ReportErrors bool | |||
} | |||
// NewProcessCollector returns a collector which exports the current state of | |||
// process metrics including CPU, memory and file descriptor usage as well as | |||
// the process start time. The detailed behavior is defined by the provided | |||
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a | |||
// collector for the current process with an empty namespace string and no error | |||
// reporting. | |||
// | |||
// Currently, the collector depends on a Linux-style proc filesystem and | |||
// therefore only exports metrics for Linux. | |||
// | |||
// Note: An older version of this function had the following signature: | |||
// | |||
// NewProcessCollector(pid int, namespace string) Collector | |||
// | |||
// Most commonly, it was called as | |||
// | |||
// NewProcessCollector(os.Getpid(), "") | |||
// | |||
// The following call of the current version is equivalent to the above: | |||
// | |||
// NewProcessCollector(ProcessCollectorOpts{}) | |||
func NewProcessCollector(opts ProcessCollectorOpts) Collector { | |||
ns := "" | |||
if len(opts.Namespace) > 0 { | |||
ns = opts.Namespace + "_" | |||
} | |||
c := &processCollector{ | |||
reportErrors: opts.ReportErrors, | |||
cpuTotal: NewDesc( | |||
ns+"process_cpu_seconds_total", | |||
"Total user and system CPU time spent in seconds.", | |||
nil, nil, | |||
), | |||
openFDs: NewDesc( | |||
ns+"process_open_fds", | |||
"Number of open file descriptors.", | |||
nil, nil, | |||
), | |||
maxFDs: NewDesc( | |||
ns+"process_max_fds", | |||
"Maximum number of open file descriptors.", | |||
nil, nil, | |||
), | |||
vsize: NewDesc( | |||
ns+"process_virtual_memory_bytes", | |||
"Virtual memory size in bytes.", | |||
nil, nil, | |||
), | |||
maxVsize: NewDesc( | |||
ns+"process_virtual_memory_max_bytes", | |||
"Maximum amount of virtual memory available in bytes.", | |||
nil, nil, | |||
), | |||
rss: NewDesc( | |||
ns+"process_resident_memory_bytes", | |||
"Resident memory size in bytes.", | |||
nil, nil, | |||
), | |||
startTime: NewDesc( | |||
ns+"process_start_time_seconds", | |||
"Start time of the process since unix epoch in seconds.", | |||
nil, nil, | |||
), | |||
} | |||
if opts.PidFn == nil { | |||
pid := os.Getpid() | |||
c.pidFn = func() (int, error) { return pid, nil } | |||
} else { | |||
c.pidFn = opts.PidFn | |||
} | |||
// Set up process metric collection if supported by the runtime. | |||
if _, err := procfs.NewStat(); err == nil { | |||
c.collectFn = c.processCollect | |||
} else { | |||
c.collectFn = func(ch chan<- Metric) { | |||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) | |||
} | |||
} | |||
return c | |||
} | |||
// Describe returns all descriptions of the collector. | |||
func (c *processCollector) Describe(ch chan<- *Desc) { | |||
ch <- c.cpuTotal | |||
ch <- c.openFDs | |||
ch <- c.maxFDs | |||
ch <- c.vsize | |||
ch <- c.maxVsize | |||
ch <- c.rss | |||
ch <- c.startTime | |||
} | |||
// Collect returns the current state of all metrics of the collector. | |||
func (c *processCollector) Collect(ch chan<- Metric) { | |||
c.collectFn(ch) | |||
} | |||
func (c *processCollector) processCollect(ch chan<- Metric) { | |||
pid, err := c.pidFn() | |||
if err != nil { | |||
c.reportError(ch, nil, err) | |||
return | |||
} | |||
p, err := procfs.NewProc(pid) | |||
if err != nil { | |||
c.reportError(ch, nil, err) | |||
return | |||
} | |||
if stat, err := p.NewStat(); err == nil { | |||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) | |||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) | |||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) | |||
if startTime, err := stat.StartTime(); err == nil { | |||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) | |||
} else { | |||
c.reportError(ch, c.startTime, err) | |||
} | |||
} else { | |||
c.reportError(ch, nil, err) | |||
} | |||
if fds, err := p.FileDescriptorsLen(); err == nil { | |||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) | |||
} else { | |||
c.reportError(ch, c.openFDs, err) | |||
} | |||
if limits, err := p.NewLimits(); err == nil { | |||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) | |||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) | |||
} else { | |||
c.reportError(ch, nil, err) | |||
} | |||
} | |||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { | |||
if !c.reportErrors { | |||
return | |||
} | |||
if desc == nil { | |||
desc = NewInvalidDesc(err) | |||
} | |||
ch <- NewInvalidMetric(desc, err) | |||
} |
@@ -0,0 +1,199 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package promhttp | |||
import ( | |||
"bufio" | |||
"io" | |||
"net" | |||
"net/http" | |||
) | |||
const ( | |||
closeNotifier = 1 << iota | |||
flusher | |||
hijacker | |||
readerFrom | |||
pusher | |||
) | |||
type delegator interface { | |||
http.ResponseWriter | |||
Status() int | |||
Written() int64 | |||
} | |||
type responseWriterDelegator struct { | |||
http.ResponseWriter | |||
handler, method string | |||
status int | |||
written int64 | |||
wroteHeader bool | |||
observeWriteHeader func(int) | |||
} | |||
func (r *responseWriterDelegator) Status() int { | |||
return r.status | |||
} | |||
func (r *responseWriterDelegator) Written() int64 { | |||
return r.written | |||
} | |||
func (r *responseWriterDelegator) WriteHeader(code int) { | |||
r.status = code | |||
r.wroteHeader = true | |||
r.ResponseWriter.WriteHeader(code) | |||
if r.observeWriteHeader != nil { | |||
r.observeWriteHeader(code) | |||
} | |||
} | |||
func (r *responseWriterDelegator) Write(b []byte) (int, error) { | |||
if !r.wroteHeader { | |||
r.WriteHeader(http.StatusOK) | |||
} | |||
n, err := r.ResponseWriter.Write(b) | |||
r.written += int64(n) | |||
return n, err | |||
} | |||
type closeNotifierDelegator struct{ *responseWriterDelegator } | |||
type flusherDelegator struct{ *responseWriterDelegator } | |||
type hijackerDelegator struct{ *responseWriterDelegator } | |||
type readerFromDelegator struct{ *responseWriterDelegator } | |||
func (d closeNotifierDelegator) CloseNotify() <-chan bool { | |||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify() | |||
} | |||
func (d flusherDelegator) Flush() { | |||
d.ResponseWriter.(http.Flusher).Flush() | |||
} | |||
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { | |||
return d.ResponseWriter.(http.Hijacker).Hijack() | |||
} | |||
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { | |||
if !d.wroteHeader { | |||
d.WriteHeader(http.StatusOK) | |||
} | |||
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) | |||
d.written += n | |||
return n, err | |||
} | |||
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) | |||
func init() { | |||
// TODO(beorn7): Code generation would help here. | |||
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 | |||
return d | |||
} | |||
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 | |||
return closeNotifierDelegator{d} | |||
} | |||
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 | |||
return flusherDelegator{d} | |||
} | |||
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 | |||
return hijackerDelegator{d} | |||
} | |||
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Hijacker | |||
http.CloseNotifier | |||
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Hijacker | |||
http.Flusher | |||
}{d, hijackerDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Hijacker | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 | |||
return readerFromDelegator{d} | |||
} | |||
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.CloseNotifier | |||
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.Flusher | |||
}{d, readerFromDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.Hijacker | |||
}{d, readerFromDelegator{d}, hijackerDelegator{d}} | |||
} | |||
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.Hijacker | |||
http.CloseNotifier | |||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.Hijacker | |||
http.Flusher | |||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 | |||
return struct { | |||
*responseWriterDelegator | |||
io.ReaderFrom | |||
http.Hijacker | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
} |
@@ -0,0 +1,181 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// +build go1.8 | |||
package promhttp | |||
import ( | |||
"io" | |||
"net/http" | |||
) | |||
type pusherDelegator struct{ *responseWriterDelegator } | |||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { | |||
return d.ResponseWriter.(http.Pusher).Push(target, opts) | |||
} | |||
func init() { | |||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 | |||
return pusherDelegator{d} | |||
} | |||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.Flusher | |||
}{d, pusherDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.Hijacker | |||
}{d, pusherDelegator{d}, hijackerDelegator{d}} | |||
} | |||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.Hijacker | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.Hijacker | |||
http.Flusher | |||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
http.Hijacker | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.Flusher | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.Hijacker | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.Hijacker | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.Hijacker | |||
http.Flusher | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} | |||
} | |||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 | |||
return struct { | |||
*responseWriterDelegator | |||
http.Pusher | |||
io.ReaderFrom | |||
http.Hijacker | |||
http.Flusher | |||
http.CloseNotifier | |||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} | |||
} | |||
} | |||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { | |||
d := &responseWriterDelegator{ | |||
ResponseWriter: w, | |||
observeWriteHeader: observeWriteHeaderFunc, | |||
} | |||
id := 0 | |||
if _, ok := w.(http.CloseNotifier); ok { | |||
id += closeNotifier | |||
} | |||
if _, ok := w.(http.Flusher); ok { | |||
id += flusher | |||
} | |||
if _, ok := w.(http.Hijacker); ok { | |||
id += hijacker | |||
} | |||
if _, ok := w.(io.ReaderFrom); ok { | |||
id += readerFrom | |||
} | |||
if _, ok := w.(http.Pusher); ok { | |||
id += pusher | |||
} | |||
return pickDelegator[id](d) | |||
} |
@@ -0,0 +1,44 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// +build !go1.8 | |||
package promhttp | |||
import ( | |||
"io" | |||
"net/http" | |||
) | |||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { | |||
d := &responseWriterDelegator{ | |||
ResponseWriter: w, | |||
observeWriteHeader: observeWriteHeaderFunc, | |||
} | |||
id := 0 | |||
if _, ok := w.(http.CloseNotifier); ok { | |||
id += closeNotifier | |||
} | |||
if _, ok := w.(http.Flusher); ok { | |||
id += flusher | |||
} | |||
if _, ok := w.(http.Hijacker); ok { | |||
id += hijacker | |||
} | |||
if _, ok := w.(io.ReaderFrom); ok { | |||
id += readerFrom | |||
} | |||
return pickDelegator[id](d) | |||
} |
@@ -0,0 +1,311 @@ | |||
// Copyright 2016 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package promhttp provides tooling around HTTP servers and clients. | |||
// | |||
// First, the package allows the creation of http.Handler instances to expose | |||
// Prometheus metrics via HTTP. promhttp.Handler acts on the | |||
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a | |||
// custom registry or anything that implements the Gatherer interface. It also | |||
// allows the creation of handlers that act differently on errors or allow to | |||
// log errors. | |||
// | |||
// Second, the package provides tooling to instrument instances of http.Handler | |||
// via middleware. Middleware wrappers follow the naming scheme | |||
// InstrumentHandlerX, where X describes the intended use of the middleware. | |||
// See each function's doc comment for specific details. | |||
// | |||
// Finally, the package allows for an http.RoundTripper to be instrumented via | |||
// middleware. Middleware wrappers follow the naming scheme | |||
// InstrumentRoundTripperX, where X describes the intended use of the | |||
// middleware. See each function's doc comment for specific details. | |||
package promhttp | |||
import ( | |||
"bytes" | |||
"compress/gzip" | |||
"fmt" | |||
"io" | |||
"net/http" | |||
"strings" | |||
"sync" | |||
"time" | |||
"github.com/prometheus/common/expfmt" | |||
"github.com/prometheus/client_golang/prometheus" | |||
) | |||
const ( | |||
contentTypeHeader = "Content-Type" | |||
contentLengthHeader = "Content-Length" | |||
contentEncodingHeader = "Content-Encoding" | |||
acceptEncodingHeader = "Accept-Encoding" | |||
) | |||
var bufPool sync.Pool | |||
func getBuf() *bytes.Buffer { | |||
buf := bufPool.Get() | |||
if buf == nil { | |||
return &bytes.Buffer{} | |||
} | |||
return buf.(*bytes.Buffer) | |||
} | |||
func giveBuf(buf *bytes.Buffer) { | |||
buf.Reset() | |||
bufPool.Put(buf) | |||
} | |||
// Handler returns an http.Handler for the prometheus.DefaultGatherer, using | |||
// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has | |||
// no error logging, and it applies compression if requested by the client. | |||
// | |||
// The returned http.Handler is already instrumented using the | |||
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you | |||
// create multiple http.Handlers by separate calls of the Handler function, the | |||
// metrics used for instrumentation will be shared between them, providing | |||
// global scrape counts. | |||
// | |||
// This function is meant to cover the bulk of basic use cases. If you are doing | |||
// anything that requires more customization (including using a non-default | |||
// Gatherer, different instrumentation, and non-default HandlerOpts), use the | |||
// HandlerFor function. See there for details. | |||
func Handler() http.Handler { | |||
return InstrumentMetricHandler( | |||
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), | |||
) | |||
} | |||
// HandlerFor returns an uninstrumented http.Handler for the provided | |||
// Gatherer. The behavior of the Handler is defined by the provided | |||
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom | |||
// Gatherers, with non-default HandlerOpts, and/or with custom (or no) | |||
// instrumentation. Use the InstrumentMetricHandler function to apply the same | |||
// kind of instrumentation as it is used by the Handler function. | |||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { | |||
var inFlightSem chan struct{} | |||
if opts.MaxRequestsInFlight > 0 { | |||
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) | |||
} | |||
h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | |||
if inFlightSem != nil { | |||
select { | |||
case inFlightSem <- struct{}{}: // All good, carry on. | |||
defer func() { <-inFlightSem }() | |||
default: | |||
http.Error(w, fmt.Sprintf( | |||
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, | |||
), http.StatusServiceUnavailable) | |||
return | |||
} | |||
} | |||
mfs, err := reg.Gather() | |||
if err != nil { | |||
if opts.ErrorLog != nil { | |||
opts.ErrorLog.Println("error gathering metrics:", err) | |||
} | |||
switch opts.ErrorHandling { | |||
case PanicOnError: | |||
panic(err) | |||
case ContinueOnError: | |||
if len(mfs) == 0 { | |||
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
case HTTPErrorOnError: | |||
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
} | |||
contentType := expfmt.Negotiate(req.Header) | |||
buf := getBuf() | |||
defer giveBuf(buf) | |||
writer, encoding := decorateWriter(req, buf, opts.DisableCompression) | |||
enc := expfmt.NewEncoder(writer, contentType) | |||
var lastErr error | |||
for _, mf := range mfs { | |||
if err := enc.Encode(mf); err != nil { | |||
lastErr = err | |||
if opts.ErrorLog != nil { | |||
opts.ErrorLog.Println("error encoding metric family:", err) | |||
} | |||
switch opts.ErrorHandling { | |||
case PanicOnError: | |||
panic(err) | |||
case ContinueOnError: | |||
// Handled later. | |||
case HTTPErrorOnError: | |||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
} | |||
} | |||
if closer, ok := writer.(io.Closer); ok { | |||
closer.Close() | |||
} | |||
if lastErr != nil && buf.Len() == 0 { | |||
http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) | |||
return | |||
} | |||
header := w.Header() | |||
header.Set(contentTypeHeader, string(contentType)) | |||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) | |||
if encoding != "" { | |||
header.Set(contentEncodingHeader, encoding) | |||
} | |||
if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil { | |||
opts.ErrorLog.Println("error while sending encoded metrics:", err) | |||
} | |||
// TODO(beorn7): Consider streaming serving of metrics. | |||
}) | |||
if opts.Timeout <= 0 { | |||
return h | |||
} | |||
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( | |||
"Exceeded configured timeout of %v.\n", | |||
opts.Timeout, | |||
)) | |||
} | |||
// InstrumentMetricHandler is usually used with an http.Handler returned by the | |||
// HandlerFor function. It instruments the provided http.Handler with two | |||
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count | |||
// scrapes partitioned by HTTP status code, and a gauge | |||
// "promhttp_metric_handler_requests_in_flight" to track the number of | |||
// simultaneous scrapes. This function idempotently registers collectors for | |||
// both metrics with the provided Registerer. It panics if the registration | |||
// fails. The provided metrics are useful to see how many scrapes hit the | |||
// monitored target (which could be from different Prometheus servers or other | |||
// scrapers), and how often they overlap (which would result in more than one | |||
// scrape in flight at the same time). Note that the scrapes-in-flight gauge | |||
// will contain the scrape by which it is exposed, while the scrape counter will | |||
// only get incremented after the scrape is complete (as only then the status | |||
// code is known). For tracking scrape durations, use the | |||
// "scrape_duration_seconds" gauge created by the Prometheus server upon each | |||
// scrape. | |||
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { | |||
cnt := prometheus.NewCounterVec( | |||
prometheus.CounterOpts{ | |||
Name: "promhttp_metric_handler_requests_total", | |||
Help: "Total number of scrapes by HTTP status code.", | |||
}, | |||
[]string{"code"}, | |||
) | |||
// Initialize the most likely HTTP status codes. | |||
cnt.WithLabelValues("200") | |||
cnt.WithLabelValues("500") | |||
cnt.WithLabelValues("503") | |||
if err := reg.Register(cnt); err != nil { | |||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok { | |||
cnt = are.ExistingCollector.(*prometheus.CounterVec) | |||
} else { | |||
panic(err) | |||
} | |||
} | |||
gge := prometheus.NewGauge(prometheus.GaugeOpts{ | |||
Name: "promhttp_metric_handler_requests_in_flight", | |||
Help: "Current number of scrapes being served.", | |||
}) | |||
if err := reg.Register(gge); err != nil { | |||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok { | |||
gge = are.ExistingCollector.(prometheus.Gauge) | |||
} else { | |||
panic(err) | |||
} | |||
} | |||
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) | |||
} | |||
// HandlerErrorHandling defines how a Handler serving metrics will handle | |||
// errors. | |||
type HandlerErrorHandling int | |||
// These constants cause handlers serving metrics to behave as described if | |||
// errors are encountered. | |||
const ( | |||
// Serve an HTTP status code 500 upon the first error | |||
// encountered. Report the error message in the body. | |||
HTTPErrorOnError HandlerErrorHandling = iota | |||
// Ignore errors and try to serve as many metrics as possible. However, | |||
// if no metrics can be served, serve an HTTP status code 500 and the | |||
// last error message in the body. Only use this in deliberate "best | |||
// effort" metrics collection scenarios. It is recommended to at least | |||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask | |||
// errors completely. | |||
ContinueOnError | |||
// Panic upon the first error encountered (useful for "crash only" apps). | |||
PanicOnError | |||
) | |||
// Logger is the minimal interface HandlerOpts needs for logging. Note that | |||
// log.Logger from the standard library implements this interface, and it is | |||
// easy to implement by custom loggers, if they don't do so already anyway. | |||
type Logger interface { | |||
Println(v ...interface{}) | |||
} | |||
// HandlerOpts specifies options how to serve metrics via an http.Handler. The | |||
// zero value of HandlerOpts is a reasonable default. | |||
type HandlerOpts struct { | |||
// ErrorLog specifies an optional logger for errors collecting and | |||
// serving metrics. If nil, errors are not logged at all. | |||
ErrorLog Logger | |||
// ErrorHandling defines how errors are handled. Note that errors are | |||
// logged regardless of the configured ErrorHandling provided ErrorLog | |||
// is not nil. | |||
ErrorHandling HandlerErrorHandling | |||
// If DisableCompression is true, the handler will never compress the | |||
// response, even if requested by the client. | |||
DisableCompression bool | |||
// The number of concurrent HTTP requests is limited to | |||
// MaxRequestsInFlight. Additional requests are responded to with 503 | |||
// Service Unavailable and a suitable message in the body. If | |||
// MaxRequestsInFlight is 0 or negative, no limit is applied. | |||
MaxRequestsInFlight int | |||
// If handling a request takes longer than Timeout, it is responded to | |||
// with 503 ServiceUnavailable and a suitable Message. No timeout is | |||
// applied if Timeout is 0 or negative. Note that with the current | |||
// implementation, reaching the timeout simply ends the HTTP requests as | |||
// described above (and even that only if sending of the body hasn't | |||
// started yet), while the bulk work of gathering all the metrics keeps | |||
// running in the background (with the eventual result to be thrown | |||
// away). Until the implementation is improved, it is recommended to | |||
// implement a separate timeout in potentially slow Collectors. | |||
Timeout time.Duration | |||
} | |||
// decorateWriter wraps a writer to handle gzip compression if requested. It | |||
// returns the decorated writer and the appropriate "Content-Encoding" header | |||
// (which is empty if no compression is enabled). | |||
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { | |||
if compressionDisabled { | |||
return writer, "" | |||
} | |||
header := request.Header.Get(acceptEncodingHeader) | |||
parts := strings.Split(header, ",") | |||
for _, part := range parts { | |||
part = strings.TrimSpace(part) | |||
if part == "gzip" || strings.HasPrefix(part, "gzip;") { | |||
return gzip.NewWriter(writer), "gzip" | |||
} | |||
} | |||
return writer, "" | |||
} |
@@ -0,0 +1,97 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package promhttp | |||
import ( | |||
"net/http" | |||
"time" | |||
"github.com/prometheus/client_golang/prometheus" | |||
) | |||
// The RoundTripperFunc type is an adapter to allow the use of ordinary | |||
// functions as RoundTrippers. If f is a function with the appropriate | |||
// signature, RountTripperFunc(f) is a RoundTripper that calls f. | |||
type RoundTripperFunc func(req *http.Request) (*http.Response, error) | |||
// RoundTrip implements the RoundTripper interface. | |||
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { | |||
return rt(r) | |||
} | |||
// InstrumentRoundTripperInFlight is a middleware that wraps the provided | |||
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of | |||
// requests currently handled by the wrapped http.RoundTripper. | |||
// | |||
// See the example for ExampleInstrumentRoundTripperDuration for example usage. | |||
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { | |||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | |||
gauge.Inc() | |||
defer gauge.Dec() | |||
return next.RoundTrip(r) | |||
}) | |||
} | |||
// InstrumentRoundTripperCounter is a middleware that wraps the provided | |||
// http.RoundTripper to observe the request result with the provided CounterVec. | |||
// The CounterVec must have zero, one, or two non-const non-curried labels. For | |||
// those, the only allowed label names are "code" and "method". The function | |||
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code | |||
// and/or HTTP method if the respective instance label names are present in the | |||
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. | |||
// | |||
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter | |||
// is not incremented. | |||
// | |||
// See the example for ExampleInstrumentRoundTripperDuration for example usage. | |||
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { | |||
code, method := checkLabels(counter) | |||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | |||
resp, err := next.RoundTrip(r) | |||
if err == nil { | |||
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() | |||
} | |||
return resp, err | |||
}) | |||
} | |||
// InstrumentRoundTripperDuration is a middleware that wraps the provided | |||
// http.RoundTripper to observe the request duration with the provided | |||
// ObserverVec. The ObserverVec must have zero, one, or two non-const | |||
// non-curried labels. For those, the only allowed label names are "code" and | |||
// "method". The function panics otherwise. The Observe method of the Observer | |||
// in the ObserverVec is called with the request duration in | |||
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the | |||
// respective instance label names are present in the ObserverVec. For | |||
// unpartitioned observations, use an ObserverVec with zero labels. Note that | |||
// partitioning of Histograms is expensive and should be used judiciously. | |||
// | |||
// If the wrapped RoundTripper panics or returns a non-nil error, no values are | |||
// reported. | |||
// | |||
// Note that this method is only guaranteed to never observe negative durations | |||
// if used with Go1.9+. | |||
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { | |||
code, method := checkLabels(obs) | |||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | |||
start := time.Now() | |||
resp, err := next.RoundTrip(r) | |||
if err == nil { | |||
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) | |||
} | |||
return resp, err | |||
}) | |||
} |
@@ -0,0 +1,144 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// +build go1.8 | |||
package promhttp | |||
import ( | |||
"context" | |||
"crypto/tls" | |||
"net/http" | |||
"net/http/httptrace" | |||
"time" | |||
) | |||
// InstrumentTrace is used to offer flexibility in instrumenting the available | |||
// httptrace.ClientTrace hook functions. Each function is passed a float64 | |||
// representing the time in seconds since the start of the http request. A user | |||
// may choose to use separately buckets Histograms, or implement custom | |||
// instance labels on a per function basis. | |||
type InstrumentTrace struct { | |||
GotConn func(float64) | |||
PutIdleConn func(float64) | |||
GotFirstResponseByte func(float64) | |||
Got100Continue func(float64) | |||
DNSStart func(float64) | |||
DNSDone func(float64) | |||
ConnectStart func(float64) | |||
ConnectDone func(float64) | |||
TLSHandshakeStart func(float64) | |||
TLSHandshakeDone func(float64) | |||
WroteHeaders func(float64) | |||
Wait100Continue func(float64) | |||
WroteRequest func(float64) | |||
} | |||
// InstrumentRoundTripperTrace is a middleware that wraps the provided | |||
// RoundTripper and reports times to hook functions provided in the | |||
// InstrumentTrace struct. Hook functions that are not present in the provided | |||
// InstrumentTrace struct are ignored. Times reported to the hook functions are | |||
// time since the start of the request. Only with Go1.9+, those times are | |||
// guaranteed to never be negative. (Earlier Go versions are not using a | |||
// monotonic clock.) Note that partitioning of Histograms is expensive and | |||
// should be used judiciously. | |||
// | |||
// For hook functions that receive an error as an argument, no observations are | |||
// made in the event of a non-nil error value. | |||
// | |||
// See the example for ExampleInstrumentRoundTripperDuration for example usage. | |||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { | |||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { | |||
start := time.Now() | |||
trace := &httptrace.ClientTrace{ | |||
GotConn: func(_ httptrace.GotConnInfo) { | |||
if it.GotConn != nil { | |||
it.GotConn(time.Since(start).Seconds()) | |||
} | |||
}, | |||
PutIdleConn: func(err error) { | |||
if err != nil { | |||
return | |||
} | |||
if it.PutIdleConn != nil { | |||
it.PutIdleConn(time.Since(start).Seconds()) | |||
} | |||
}, | |||
DNSStart: func(_ httptrace.DNSStartInfo) { | |||
if it.DNSStart != nil { | |||
it.DNSStart(time.Since(start).Seconds()) | |||
} | |||
}, | |||
DNSDone: func(_ httptrace.DNSDoneInfo) { | |||
if it.DNSDone != nil { | |||
it.DNSDone(time.Since(start).Seconds()) | |||
} | |||
}, | |||
ConnectStart: func(_, _ string) { | |||
if it.ConnectStart != nil { | |||
it.ConnectStart(time.Since(start).Seconds()) | |||
} | |||
}, | |||
ConnectDone: func(_, _ string, err error) { | |||
if err != nil { | |||
return | |||
} | |||
if it.ConnectDone != nil { | |||
it.ConnectDone(time.Since(start).Seconds()) | |||
} | |||
}, | |||
GotFirstResponseByte: func() { | |||
if it.GotFirstResponseByte != nil { | |||
it.GotFirstResponseByte(time.Since(start).Seconds()) | |||
} | |||
}, | |||
Got100Continue: func() { | |||
if it.Got100Continue != nil { | |||
it.Got100Continue(time.Since(start).Seconds()) | |||
} | |||
}, | |||
TLSHandshakeStart: func() { | |||
if it.TLSHandshakeStart != nil { | |||
it.TLSHandshakeStart(time.Since(start).Seconds()) | |||
} | |||
}, | |||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) { | |||
if err != nil { | |||
return | |||
} | |||
if it.TLSHandshakeDone != nil { | |||
it.TLSHandshakeDone(time.Since(start).Seconds()) | |||
} | |||
}, | |||
WroteHeaders: func() { | |||
if it.WroteHeaders != nil { | |||
it.WroteHeaders(time.Since(start).Seconds()) | |||
} | |||
}, | |||
Wait100Continue: func() { | |||
if it.Wait100Continue != nil { | |||
it.Wait100Continue(time.Since(start).Seconds()) | |||
} | |||
}, | |||
WroteRequest: func(_ httptrace.WroteRequestInfo) { | |||
if it.WroteRequest != nil { | |||
it.WroteRequest(time.Since(start).Seconds()) | |||
} | |||
}, | |||
} | |||
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) | |||
return next.RoundTrip(r) | |||
}) | |||
} |
@@ -0,0 +1,447 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package promhttp | |||
import ( | |||
"errors" | |||
"net/http" | |||
"strconv" | |||
"strings" | |||
"time" | |||
dto "github.com/prometheus/client_model/go" | |||
"github.com/prometheus/client_golang/prometheus" | |||
) | |||
// magicString is used for the hacky label test in checkLabels. Remove once fixed. | |||
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" | |||
// InstrumentHandlerInFlight is a middleware that wraps the provided | |||
// http.Handler. It sets the provided prometheus.Gauge to the number of | |||
// requests currently handled by the wrapped http.Handler. | |||
// | |||
// See the example for InstrumentHandlerDuration for example usage. | |||
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
g.Inc() | |||
defer g.Dec() | |||
next.ServeHTTP(w, r) | |||
}) | |||
} | |||
// InstrumentHandlerDuration is a middleware that wraps the provided | |||
// http.Handler to observe the request duration with the provided ObserverVec. | |||
// The ObserverVec must have zero, one, or two non-const non-curried labels. For | |||
// those, the only allowed label names are "code" and "method". The function | |||
// panics otherwise. The Observe method of the Observer in the ObserverVec is | |||
// called with the request duration in seconds. Partitioning happens by HTTP | |||
// status code and/or HTTP method if the respective instance label names are | |||
// present in the ObserverVec. For unpartitioned observations, use an | |||
// ObserverVec with zero labels. Note that partitioning of Histograms is | |||
// expensive and should be used judiciously. | |||
// | |||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. | |||
// | |||
// If the wrapped Handler panics, no values are reported. | |||
// | |||
// Note that this method is only guaranteed to never observe negative durations | |||
// if used with Go1.9+. | |||
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { | |||
code, method := checkLabels(obs) | |||
if code { | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
now := time.Now() | |||
d := newDelegator(w, nil) | |||
next.ServeHTTP(d, r) | |||
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) | |||
}) | |||
} | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
now := time.Now() | |||
next.ServeHTTP(w, r) | |||
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) | |||
}) | |||
} | |||
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler | |||
// to observe the request result with the provided CounterVec. The CounterVec | |||
// must have zero, one, or two non-const non-curried labels. For those, the only | |||
// allowed label names are "code" and "method". The function panics | |||
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or | |||
// HTTP method if the respective instance label names are present in the | |||
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. | |||
// | |||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. | |||
// | |||
// If the wrapped Handler panics, the Counter is not incremented. | |||
// | |||
// See the example for InstrumentHandlerDuration for example usage. | |||
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { | |||
code, method := checkLabels(counter) | |||
if code { | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
d := newDelegator(w, nil) | |||
next.ServeHTTP(d, r) | |||
counter.With(labels(code, method, r.Method, d.Status())).Inc() | |||
}) | |||
} | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
next.ServeHTTP(w, r) | |||
counter.With(labels(code, method, r.Method, 0)).Inc() | |||
}) | |||
} | |||
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided | |||
// http.Handler to observe with the provided ObserverVec the request duration | |||
// until the response headers are written. The ObserverVec must have zero, one, | |||
// or two non-const non-curried labels. For those, the only allowed label names | |||
// are "code" and "method". The function panics otherwise. The Observe method of | |||
// the Observer in the ObserverVec is called with the request duration in | |||
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the | |||
// respective instance label names are present in the ObserverVec. For | |||
// unpartitioned observations, use an ObserverVec with zero labels. Note that | |||
// partitioning of Histograms is expensive and should be used judiciously. | |||
// | |||
// If the wrapped Handler panics before calling WriteHeader, no value is | |||
// reported. | |||
// | |||
// Note that this method is only guaranteed to never observe negative durations | |||
// if used with Go1.9+. | |||
// | |||
// See the example for InstrumentHandlerDuration for example usage. | |||
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { | |||
code, method := checkLabels(obs) | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
now := time.Now() | |||
d := newDelegator(w, func(status int) { | |||
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) | |||
}) | |||
next.ServeHTTP(d, r) | |||
}) | |||
} | |||
// InstrumentHandlerRequestSize is a middleware that wraps the provided | |||
// http.Handler to observe the request size with the provided ObserverVec. The | |||
// ObserverVec must have zero, one, or two non-const non-curried labels. For | |||
// those, the only allowed label names are "code" and "method". The function | |||
// panics otherwise. The Observe method of the Observer in the ObserverVec is | |||
// called with the request size in bytes. Partitioning happens by HTTP status | |||
// code and/or HTTP method if the respective instance label names are present in | |||
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero | |||
// labels. Note that partitioning of Histograms is expensive and should be used | |||
// judiciously. | |||
// | |||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. | |||
// | |||
// If the wrapped Handler panics, no values are reported. | |||
// | |||
// See the example for InstrumentHandlerDuration for example usage. | |||
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { | |||
code, method := checkLabels(obs) | |||
if code { | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
d := newDelegator(w, nil) | |||
next.ServeHTTP(d, r) | |||
size := computeApproximateRequestSize(r) | |||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) | |||
}) | |||
} | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
next.ServeHTTP(w, r) | |||
size := computeApproximateRequestSize(r) | |||
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) | |||
}) | |||
} | |||
// InstrumentHandlerResponseSize is a middleware that wraps the provided | |||
// http.Handler to observe the response size with the provided ObserverVec. The | |||
// ObserverVec must have zero, one, or two non-const non-curried labels. For | |||
// those, the only allowed label names are "code" and "method". The function | |||
// panics otherwise. The Observe method of the Observer in the ObserverVec is | |||
// called with the response size in bytes. Partitioning happens by HTTP status | |||
// code and/or HTTP method if the respective instance label names are present in | |||
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero | |||
// labels. Note that partitioning of Histograms is expensive and should be used | |||
// judiciously. | |||
// | |||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. | |||
// | |||
// If the wrapped Handler panics, no values are reported. | |||
// | |||
// See the example for InstrumentHandlerDuration for example usage. | |||
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { | |||
code, method := checkLabels(obs) | |||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | |||
d := newDelegator(w, nil) | |||
next.ServeHTTP(d, r) | |||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) | |||
}) | |||
} | |||
func checkLabels(c prometheus.Collector) (code bool, method bool) { | |||
// TODO(beorn7): Remove this hacky way to check for instance labels | |||
// once Descriptors can have their dimensionality queried. | |||
var ( | |||
desc *prometheus.Desc | |||
m prometheus.Metric | |||
pm dto.Metric | |||
lvs []string | |||
) | |||
// Get the Desc from the Collector. | |||
descc := make(chan *prometheus.Desc, 1) | |||
c.Describe(descc) | |||
select { | |||
case desc = <-descc: | |||
default: | |||
panic("no description provided by collector") | |||
} | |||
select { | |||
case <-descc: | |||
panic("more than one description provided by collector") | |||
default: | |||
} | |||
close(descc) | |||
// Create a ConstMetric with the Desc. Since we don't know how many | |||
// variable labels there are, try for as long as it needs. | |||
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { | |||
m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) | |||
} | |||
// Write out the metric into a proto message and look at the labels. | |||
// If the value is not the magicString, it is a constLabel, which doesn't interest us. | |||
// If the label is curried, it doesn't interest us. | |||
// In all other cases, only "code" or "method" is allowed. | |||
if err := m.Write(&pm); err != nil { | |||
panic("error checking metric for labels") | |||
} | |||
for _, label := range pm.Label { | |||
name, value := label.GetName(), label.GetValue() | |||
if value != magicString || isLabelCurried(c, name) { | |||
continue | |||
} | |||
switch name { | |||
case "code": | |||
code = true | |||
case "method": | |||
method = true | |||
default: | |||
panic("metric partitioned with non-supported labels") | |||
} | |||
} | |||
return | |||
} | |||
func isLabelCurried(c prometheus.Collector, label string) bool { | |||
// This is even hackier than the label test above. | |||
// We essentially try to curry again and see if it works. | |||
// But for that, we need to type-convert to the two | |||
// types we use here, ObserverVec or *CounterVec. | |||
switch v := c.(type) { | |||
case *prometheus.CounterVec: | |||
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { | |||
return false | |||
} | |||
case prometheus.ObserverVec: | |||
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { | |||
return false | |||
} | |||
default: | |||
panic("unsupported metric vec type") | |||
} | |||
return true | |||
} | |||
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid | |||
// unnecessary allocations on each request. | |||
var emptyLabels = prometheus.Labels{} | |||
func labels(code, method bool, reqMethod string, status int) prometheus.Labels { | |||
if !(code || method) { | |||
return emptyLabels | |||
} | |||
labels := prometheus.Labels{} | |||
if code { | |||
labels["code"] = sanitizeCode(status) | |||
} | |||
if method { | |||
labels["method"] = sanitizeMethod(reqMethod) | |||
} | |||
return labels | |||
} | |||
func computeApproximateRequestSize(r *http.Request) int { | |||
s := 0 | |||
if r.URL != nil { | |||
s += len(r.URL.String()) | |||
} | |||
s += len(r.Method) | |||
s += len(r.Proto) | |||
for name, values := range r.Header { | |||
s += len(name) | |||
for _, value := range values { | |||
s += len(value) | |||
} | |||
} | |||
s += len(r.Host) | |||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. | |||
if r.ContentLength != -1 { | |||
s += int(r.ContentLength) | |||
} | |||
return s | |||
} | |||
func sanitizeMethod(m string) string { | |||
switch m { | |||
case "GET", "get": | |||
return "get" | |||
case "PUT", "put": | |||
return "put" | |||
case "HEAD", "head": | |||
return "head" | |||
case "POST", "post": | |||
return "post" | |||
case "DELETE", "delete": | |||
return "delete" | |||
case "CONNECT", "connect": | |||
return "connect" | |||
case "OPTIONS", "options": | |||
return "options" | |||
case "NOTIFY", "notify": | |||
return "notify" | |||
default: | |||
return strings.ToLower(m) | |||
} | |||
} | |||
// If the wrapped http.Handler has not set a status code, i.e. the value is | |||
// currently 0, santizeCode will return 200, for consistency with behavior in | |||
// the stdlib. | |||
func sanitizeCode(s int) string { | |||
switch s { | |||
case 100: | |||
return "100" | |||
case 101: | |||
return "101" | |||
case 200, 0: | |||
return "200" | |||
case 201: | |||
return "201" | |||
case 202: | |||
return "202" | |||
case 203: | |||
return "203" | |||
case 204: | |||
return "204" | |||
case 205: | |||
return "205" | |||
case 206: | |||
return "206" | |||
case 300: | |||
return "300" | |||
case 301: | |||
return "301" | |||
case 302: | |||
return "302" | |||
case 304: | |||
return "304" | |||
case 305: | |||
return "305" | |||
case 307: | |||
return "307" | |||
case 400: | |||
return "400" | |||
case 401: | |||
return "401" | |||
case 402: | |||
return "402" | |||
case 403: | |||
return "403" | |||
case 404: | |||
return "404" | |||
case 405: | |||
return "405" | |||
case 406: | |||
return "406" | |||
case 407: | |||
return "407" | |||
case 408: | |||
return "408" | |||
case 409: | |||
return "409" | |||
case 410: | |||
return "410" | |||
case 411: | |||
return "411" | |||
case 412: | |||
return "412" | |||
case 413: | |||
return "413" | |||
case 414: | |||
return "414" | |||
case 415: | |||
return "415" | |||
case 416: | |||
return "416" | |||
case 417: | |||
return "417" | |||
case 418: | |||
return "418" | |||
case 500: | |||
return "500" | |||
case 501: | |||
return "501" | |||
case 502: | |||
return "502" | |||
case 503: | |||
return "503" | |||
case 504: | |||
return "504" | |||
case 505: | |||
return "505" | |||
case 428: | |||
return "428" | |||
case 429: | |||
return "429" | |||
case 431: | |||
return "431" | |||
case 511: | |||
return "511" | |||
default: | |||
return strconv.Itoa(s) | |||
} | |||
} |
@@ -0,0 +1,895 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"runtime" | |||
"sort" | |||
"strings" | |||
"sync" | |||
"unicode/utf8" | |||
"github.com/golang/protobuf/proto" | |||
dto "github.com/prometheus/client_model/go" | |||
"github.com/prometheus/client_golang/prometheus/internal" | |||
) | |||
const ( | |||
// Capacity for the channel to collect metrics and descriptors. | |||
capMetricChan = 1000 | |||
capDescChan = 10 | |||
) | |||
// DefaultRegisterer and DefaultGatherer are the implementations of the | |||
// Registerer and Gatherer interface a number of convenience functions in this | |||
// package act on. Initially, both variables point to the same Registry, which | |||
// has a process collector (currently on Linux only, see NewProcessCollector) | |||
// and a Go collector (see NewGoCollector, in particular the note about | |||
// stop-the-world implication with Go versions older than 1.9) already | |||
// registered. This approach to keep default instances as global state mirrors | |||
// the approach of other packages in the Go standard library. Note that there | |||
// are caveats. Change the variables with caution and only if you understand the | |||
// consequences. Users who want to avoid global state altogether should not use | |||
// the convenience functions and act on custom instances instead. | |||
var ( | |||
defaultRegistry = NewRegistry() | |||
DefaultRegisterer Registerer = defaultRegistry | |||
DefaultGatherer Gatherer = defaultRegistry | |||
) | |||
func init() { | |||
MustRegister(NewProcessCollector(ProcessCollectorOpts{})) | |||
MustRegister(NewGoCollector()) | |||
} | |||
// NewRegistry creates a new vanilla Registry without any Collectors | |||
// pre-registered. | |||
func NewRegistry() *Registry { | |||
return &Registry{ | |||
collectorsByID: map[uint64]Collector{}, | |||
descIDs: map[uint64]struct{}{}, | |||
dimHashesByName: map[string]uint64{}, | |||
} | |||
} | |||
// NewPedanticRegistry returns a registry that checks during collection if each | |||
// collected Metric is consistent with its reported Desc, and if the Desc has | |||
// actually been registered with the registry. Unchecked Collectors (those whose | |||
// Describe methed does not yield any descriptors) are excluded from the check. | |||
// | |||
// Usually, a Registry will be happy as long as the union of all collected | |||
// Metrics is consistent and valid even if some metrics are not consistent with | |||
// their own Desc or a Desc provided by their registered Collector. Well-behaved | |||
// Collectors and Metrics will only provide consistent Descs. This Registry is | |||
// useful to test the implementation of Collectors and Metrics. | |||
func NewPedanticRegistry() *Registry { | |||
r := NewRegistry() | |||
r.pedanticChecksEnabled = true | |||
return r | |||
} | |||
// Registerer is the interface for the part of a registry in charge of | |||
// registering and unregistering. Users of custom registries should use | |||
// Registerer as type for registration purposes (rather than the Registry type | |||
// directly). In that way, they are free to use custom Registerer implementation | |||
// (e.g. for testing purposes). | |||
type Registerer interface { | |||
// Register registers a new Collector to be included in metrics | |||
// collection. It returns an error if the descriptors provided by the | |||
// Collector are invalid or if they — in combination with descriptors of | |||
// already registered Collectors — do not fulfill the consistency and | |||
// uniqueness criteria described in the documentation of metric.Desc. | |||
// | |||
// If the provided Collector is equal to a Collector already registered | |||
// (which includes the case of re-registering the same Collector), the | |||
// returned error is an instance of AlreadyRegisteredError, which | |||
// contains the previously registered Collector. | |||
// | |||
// A Collector whose Describe method does not yield any Desc is treated | |||
// as unchecked. Registration will always succeed. No check for | |||
// re-registering (see previous paragraph) is performed. Thus, the | |||
// caller is responsible for not double-registering the same unchecked | |||
// Collector, and for providing a Collector that will not cause | |||
// inconsistent metrics on collection. (This would lead to scrape | |||
// errors.) | |||
Register(Collector) error | |||
// MustRegister works like Register but registers any number of | |||
// Collectors and panics upon the first registration that causes an | |||
// error. | |||
MustRegister(...Collector) | |||
// Unregister unregisters the Collector that equals the Collector passed | |||
// in as an argument. (Two Collectors are considered equal if their | |||
// Describe method yields the same set of descriptors.) The function | |||
// returns whether a Collector was unregistered. Note that an unchecked | |||
// Collector cannot be unregistered (as its Describe method does not | |||
// yield any descriptor). | |||
// | |||
// Note that even after unregistering, it will not be possible to | |||
// register a new Collector that is inconsistent with the unregistered | |||
// Collector, e.g. a Collector collecting metrics with the same name but | |||
// a different help string. The rationale here is that the same registry | |||
// instance must only collect consistent metrics throughout its | |||
// lifetime. | |||
Unregister(Collector) bool | |||
} | |||
// Gatherer is the interface for the part of a registry in charge of gathering | |||
// the collected metrics into a number of MetricFamilies. The Gatherer interface | |||
// comes with the same general implication as described for the Registerer | |||
// interface. | |||
type Gatherer interface { | |||
// Gather calls the Collect method of the registered Collectors and then | |||
// gathers the collected metrics into a lexicographically sorted slice | |||
// of uniquely named MetricFamily protobufs. Gather ensures that the | |||
// returned slice is valid and self-consistent so that it can be used | |||
// for valid exposition. As an exception to the strict consistency | |||
// requirements described for metric.Desc, Gather will tolerate | |||
// different sets of label names for metrics of the same metric family. | |||
// | |||
// Even if an error occurs, Gather attempts to gather as many metrics as | |||
// possible. Hence, if a non-nil error is returned, the returned | |||
// MetricFamily slice could be nil (in case of a fatal error that | |||
// prevented any meaningful metric collection) or contain a number of | |||
// MetricFamily protobufs, some of which might be incomplete, and some | |||
// might be missing altogether. The returned error (which might be a | |||
// MultiError) explains the details. Note that this is mostly useful for | |||
// debugging purposes. If the gathered protobufs are to be used for | |||
// exposition in actual monitoring, it is almost always better to not | |||
// expose an incomplete result and instead disregard the returned | |||
// MetricFamily protobufs in case the returned error is non-nil. | |||
Gather() ([]*dto.MetricFamily, error) | |||
} | |||
// Register registers the provided Collector with the DefaultRegisterer. | |||
// | |||
// Register is a shortcut for DefaultRegisterer.Register(c). See there for more | |||
// details. | |||
func Register(c Collector) error { | |||
return DefaultRegisterer.Register(c) | |||
} | |||
// MustRegister registers the provided Collectors with the DefaultRegisterer and | |||
// panics if any error occurs. | |||
// | |||
// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See | |||
// there for more details. | |||
func MustRegister(cs ...Collector) { | |||
DefaultRegisterer.MustRegister(cs...) | |||
} | |||
// Unregister removes the registration of the provided Collector from the | |||
// DefaultRegisterer. | |||
// | |||
// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for | |||
// more details. | |||
func Unregister(c Collector) bool { | |||
return DefaultRegisterer.Unregister(c) | |||
} | |||
// GathererFunc turns a function into a Gatherer. | |||
type GathererFunc func() ([]*dto.MetricFamily, error) | |||
// Gather implements Gatherer. | |||
func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { | |||
return gf() | |||
} | |||
// AlreadyRegisteredError is returned by the Register method if the Collector to | |||
// be registered has already been registered before, or a different Collector | |||
// that collects the same metrics has been registered before. Registration fails | |||
// in that case, but you can detect from the kind of error what has | |||
// happened. The error contains fields for the existing Collector and the | |||
// (rejected) new Collector that equals the existing one. This can be used to | |||
// find out if an equal Collector has been registered before and switch over to | |||
// using the old one, as demonstrated in the example. | |||
type AlreadyRegisteredError struct { | |||
ExistingCollector, NewCollector Collector | |||
} | |||
func (err AlreadyRegisteredError) Error() string { | |||
return "duplicate metrics collector registration attempted" | |||
} | |||
// MultiError is a slice of errors implementing the error interface. It is used | |||
// by a Gatherer to report multiple errors during MetricFamily gathering. | |||
type MultiError []error | |||
func (errs MultiError) Error() string { | |||
if len(errs) == 0 { | |||
return "" | |||
} | |||
buf := &bytes.Buffer{} | |||
fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) | |||
for _, err := range errs { | |||
fmt.Fprintf(buf, "\n* %s", err) | |||
} | |||
return buf.String() | |||
} | |||
// Append appends the provided error if it is not nil. | |||
func (errs *MultiError) Append(err error) { | |||
if err != nil { | |||
*errs = append(*errs, err) | |||
} | |||
} | |||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only | |||
// contained error as error if len(errs is 1). In all other cases, it returns | |||
// the MultiError directly. This is helpful for returning a MultiError in a way | |||
// that only uses the MultiError if needed. | |||
func (errs MultiError) MaybeUnwrap() error { | |||
switch len(errs) { | |||
case 0: | |||
return nil | |||
case 1: | |||
return errs[0] | |||
default: | |||
return errs | |||
} | |||
} | |||
// Registry registers Prometheus collectors, collects their metrics, and gathers | |||
// them into MetricFamilies for exposition. It implements both Registerer and | |||
// Gatherer. The zero value is not usable. Create instances with NewRegistry or | |||
// NewPedanticRegistry. | |||
type Registry struct { | |||
mtx sync.RWMutex | |||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs. | |||
descIDs map[uint64]struct{} | |||
dimHashesByName map[string]uint64 | |||
uncheckedCollectors []Collector | |||
pedanticChecksEnabled bool | |||
} | |||
// Register implements Registerer. | |||
func (r *Registry) Register(c Collector) error { | |||
var ( | |||
descChan = make(chan *Desc, capDescChan) | |||
newDescIDs = map[uint64]struct{}{} | |||
newDimHashesByName = map[string]uint64{} | |||
collectorID uint64 // Just a sum of all desc IDs. | |||
duplicateDescErr error | |||
) | |||
go func() { | |||
c.Describe(descChan) | |||
close(descChan) | |||
}() | |||
r.mtx.Lock() | |||
defer func() { | |||
// Drain channel in case of premature return to not leak a goroutine. | |||
for range descChan { | |||
} | |||
r.mtx.Unlock() | |||
}() | |||
// Conduct various tests... | |||
for desc := range descChan { | |||
// Is the descriptor valid at all? | |||
if desc.err != nil { | |||
return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) | |||
} | |||
// Is the descID unique? | |||
// (In other words: Is the fqName + constLabel combination unique?) | |||
if _, exists := r.descIDs[desc.id]; exists { | |||
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) | |||
} | |||
// If it is not a duplicate desc in this collector, add it to | |||
// the collectorID. (We allow duplicate descs within the same | |||
// collector, but their existence must be a no-op.) | |||
if _, exists := newDescIDs[desc.id]; !exists { | |||
newDescIDs[desc.id] = struct{}{} | |||
collectorID += desc.id | |||
} | |||
// Are all the label names and the help string consistent with | |||
// previous descriptors of the same name? | |||
// First check existing descriptors... | |||
if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { | |||
if dimHash != desc.dimHash { | |||
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) | |||
} | |||
} else { | |||
// ...then check the new descriptors already seen. | |||
if dimHash, exists := newDimHashesByName[desc.fqName]; exists { | |||
if dimHash != desc.dimHash { | |||
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) | |||
} | |||
} else { | |||
newDimHashesByName[desc.fqName] = desc.dimHash | |||
} | |||
} | |||
} | |||
// A Collector yielding no Desc at all is considered unchecked. | |||
if len(newDescIDs) == 0 { | |||
r.uncheckedCollectors = append(r.uncheckedCollectors, c) | |||
return nil | |||
} | |||
if existing, exists := r.collectorsByID[collectorID]; exists { | |||
return AlreadyRegisteredError{ | |||
ExistingCollector: existing, | |||
NewCollector: c, | |||
} | |||
} | |||
// If the collectorID is new, but at least one of the descs existed | |||
// before, we are in trouble. | |||
if duplicateDescErr != nil { | |||
return duplicateDescErr | |||
} | |||
// Only after all tests have passed, actually register. | |||
r.collectorsByID[collectorID] = c | |||
for hash := range newDescIDs { | |||
r.descIDs[hash] = struct{}{} | |||
} | |||
for name, dimHash := range newDimHashesByName { | |||
r.dimHashesByName[name] = dimHash | |||
} | |||
return nil | |||
} | |||
// Unregister implements Registerer. | |||
func (r *Registry) Unregister(c Collector) bool { | |||
var ( | |||
descChan = make(chan *Desc, capDescChan) | |||
descIDs = map[uint64]struct{}{} | |||
collectorID uint64 // Just a sum of the desc IDs. | |||
) | |||
go func() { | |||
c.Describe(descChan) | |||
close(descChan) | |||
}() | |||
for desc := range descChan { | |||
if _, exists := descIDs[desc.id]; !exists { | |||
collectorID += desc.id | |||
descIDs[desc.id] = struct{}{} | |||
} | |||
} | |||
r.mtx.RLock() | |||
if _, exists := r.collectorsByID[collectorID]; !exists { | |||
r.mtx.RUnlock() | |||
return false | |||
} | |||
r.mtx.RUnlock() | |||
r.mtx.Lock() | |||
defer r.mtx.Unlock() | |||
delete(r.collectorsByID, collectorID) | |||
for id := range descIDs { | |||
delete(r.descIDs, id) | |||
} | |||
// dimHashesByName is left untouched as those must be consistent | |||
// throughout the lifetime of a program. | |||
return true | |||
} | |||
// MustRegister implements Registerer. | |||
func (r *Registry) MustRegister(cs ...Collector) { | |||
for _, c := range cs { | |||
if err := r.Register(c); err != nil { | |||
panic(err) | |||
} | |||
} | |||
} | |||
// Gather implements Gatherer. | |||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) { | |||
var ( | |||
checkedMetricChan = make(chan Metric, capMetricChan) | |||
uncheckedMetricChan = make(chan Metric, capMetricChan) | |||
metricHashes = map[uint64]struct{}{} | |||
wg sync.WaitGroup | |||
errs MultiError // The collected errors to return in the end. | |||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks | |||
) | |||
r.mtx.RLock() | |||
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) | |||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) | |||
checkedCollectors := make(chan Collector, len(r.collectorsByID)) | |||
uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) | |||
for _, collector := range r.collectorsByID { | |||
checkedCollectors <- collector | |||
} | |||
for _, collector := range r.uncheckedCollectors { | |||
uncheckedCollectors <- collector | |||
} | |||
// In case pedantic checks are enabled, we have to copy the map before | |||
// giving up the RLock. | |||
if r.pedanticChecksEnabled { | |||
registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) | |||
for id := range r.descIDs { | |||
registeredDescIDs[id] = struct{}{} | |||
} | |||
} | |||
r.mtx.RUnlock() | |||
wg.Add(goroutineBudget) | |||
collectWorker := func() { | |||
for { | |||
select { | |||
case collector := <-checkedCollectors: | |||
collector.Collect(checkedMetricChan) | |||
case collector := <-uncheckedCollectors: | |||
collector.Collect(uncheckedMetricChan) | |||
default: | |||
return | |||
} | |||
wg.Done() | |||
} | |||
} | |||
// Start the first worker now to make sure at least one is running. | |||
go collectWorker() | |||
goroutineBudget-- | |||
// Close checkedMetricChan and uncheckedMetricChan once all collectors | |||
// are collected. | |||
go func() { | |||
wg.Wait() | |||
close(checkedMetricChan) | |||
close(uncheckedMetricChan) | |||
}() | |||
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return. | |||
defer func() { | |||
if checkedMetricChan != nil { | |||
for range checkedMetricChan { | |||
} | |||
} | |||
if uncheckedMetricChan != nil { | |||
for range uncheckedMetricChan { | |||
} | |||
} | |||
}() | |||
// Copy the channel references so we can nil them out later to remove | |||
// them from the select statements below. | |||
cmc := checkedMetricChan | |||
umc := uncheckedMetricChan | |||
for { | |||
select { | |||
case metric, ok := <-cmc: | |||
if !ok { | |||
cmc = nil | |||
break | |||
} | |||
errs.Append(processMetric( | |||
metric, metricFamiliesByName, | |||
metricHashes, | |||
registeredDescIDs, | |||
)) | |||
case metric, ok := <-umc: | |||
if !ok { | |||
umc = nil | |||
break | |||
} | |||
errs.Append(processMetric( | |||
metric, metricFamiliesByName, | |||
metricHashes, | |||
nil, | |||
)) | |||
default: | |||
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { | |||
// All collectors are already being worked on or | |||
// we have already as many goroutines started as | |||
// there are collectors. Do the same as above, | |||
// just without the default. | |||
select { | |||
case metric, ok := <-cmc: | |||
if !ok { | |||
cmc = nil | |||
break | |||
} | |||
errs.Append(processMetric( | |||
metric, metricFamiliesByName, | |||
metricHashes, | |||
registeredDescIDs, | |||
)) | |||
case metric, ok := <-umc: | |||
if !ok { | |||
umc = nil | |||
break | |||
} | |||
errs.Append(processMetric( | |||
metric, metricFamiliesByName, | |||
metricHashes, | |||
nil, | |||
)) | |||
} | |||
break | |||
} | |||
// Start more workers. | |||
go collectWorker() | |||
goroutineBudget-- | |||
runtime.Gosched() | |||
} | |||
// Once both checkedMetricChan and uncheckdMetricChan are closed | |||
// and drained, the contraption above will nil out cmc and umc, | |||
// and then we can leave the collect loop here. | |||
if cmc == nil && umc == nil { | |||
break | |||
} | |||
} | |||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() | |||
} | |||
// processMetric is an internal helper method only used by the Gather method. | |||
func processMetric( | |||
metric Metric, | |||
metricFamiliesByName map[string]*dto.MetricFamily, | |||
metricHashes map[uint64]struct{}, | |||
registeredDescIDs map[uint64]struct{}, | |||
) error { | |||
desc := metric.Desc() | |||
// Wrapped metrics collected by an unchecked Collector can have an | |||
// invalid Desc. | |||
if desc.err != nil { | |||
return desc.err | |||
} | |||
dtoMetric := &dto.Metric{} | |||
if err := metric.Write(dtoMetric); err != nil { | |||
return fmt.Errorf("error collecting metric %v: %s", desc, err) | |||
} | |||
metricFamily, ok := metricFamiliesByName[desc.fqName] | |||
if ok { // Existing name. | |||
if metricFamily.GetHelp() != desc.help { | |||
return fmt.Errorf( | |||
"collected metric %s %s has help %q but should have %q", | |||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), | |||
) | |||
} | |||
// TODO(beorn7): Simplify switch once Desc has type. | |||
switch metricFamily.GetType() { | |||
case dto.MetricType_COUNTER: | |||
if dtoMetric.Counter == nil { | |||
return fmt.Errorf( | |||
"collected metric %s %s should be a Counter", | |||
desc.fqName, dtoMetric, | |||
) | |||
} | |||
case dto.MetricType_GAUGE: | |||
if dtoMetric.Gauge == nil { | |||
return fmt.Errorf( | |||
"collected metric %s %s should be a Gauge", | |||
desc.fqName, dtoMetric, | |||
) | |||
} | |||
case dto.MetricType_SUMMARY: | |||
if dtoMetric.Summary == nil { | |||
return fmt.Errorf( | |||
"collected metric %s %s should be a Summary", | |||
desc.fqName, dtoMetric, | |||
) | |||
} | |||
case dto.MetricType_UNTYPED: | |||
if dtoMetric.Untyped == nil { | |||
return fmt.Errorf( | |||
"collected metric %s %s should be Untyped", | |||
desc.fqName, dtoMetric, | |||
) | |||
} | |||
case dto.MetricType_HISTOGRAM: | |||
if dtoMetric.Histogram == nil { | |||
return fmt.Errorf( | |||
"collected metric %s %s should be a Histogram", | |||
desc.fqName, dtoMetric, | |||
) | |||
} | |||
default: | |||
panic("encountered MetricFamily with invalid type") | |||
} | |||
} else { // New name. | |||
metricFamily = &dto.MetricFamily{} | |||
metricFamily.Name = proto.String(desc.fqName) | |||
metricFamily.Help = proto.String(desc.help) | |||
// TODO(beorn7): Simplify switch once Desc has type. | |||
switch { | |||
case dtoMetric.Gauge != nil: | |||
metricFamily.Type = dto.MetricType_GAUGE.Enum() | |||
case dtoMetric.Counter != nil: | |||
metricFamily.Type = dto.MetricType_COUNTER.Enum() | |||
case dtoMetric.Summary != nil: | |||
metricFamily.Type = dto.MetricType_SUMMARY.Enum() | |||
case dtoMetric.Untyped != nil: | |||
metricFamily.Type = dto.MetricType_UNTYPED.Enum() | |||
case dtoMetric.Histogram != nil: | |||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() | |||
default: | |||
return fmt.Errorf("empty metric collected: %s", dtoMetric) | |||
} | |||
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { | |||
return err | |||
} | |||
metricFamiliesByName[desc.fqName] = metricFamily | |||
} | |||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { | |||
return err | |||
} | |||
if registeredDescIDs != nil { | |||
// Is the desc registered at all? | |||
if _, exist := registeredDescIDs[desc.id]; !exist { | |||
return fmt.Errorf( | |||
"collected metric %s %s with unregistered descriptor %s", | |||
metricFamily.GetName(), dtoMetric, desc, | |||
) | |||
} | |||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { | |||
return err | |||
} | |||
} | |||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric) | |||
return nil | |||
} | |||
// Gatherers is a slice of Gatherer instances that implements the Gatherer | |||
// interface itself. Its Gather method calls Gather on all Gatherers in the | |||
// slice in order and returns the merged results. Errors returned from the | |||
// Gather calles are all returned in a flattened MultiError. Duplicate and | |||
// inconsistent Metrics are skipped (first occurrence in slice order wins) and | |||
// reported in the returned error. | |||
// | |||
// Gatherers can be used to merge the Gather results from multiple | |||
// Registries. It also provides a way to directly inject existing MetricFamily | |||
// protobufs into the gathering by creating a custom Gatherer with a Gather | |||
// method that simply returns the existing MetricFamily protobufs. Note that no | |||
// registration is involved (in contrast to Collector registration), so | |||
// obviously registration-time checks cannot happen. Any inconsistencies between | |||
// the gathered MetricFamilies are reported as errors by the Gather method, and | |||
// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies | |||
// (e.g. syntactically invalid metric or label names) will go undetected. | |||
type Gatherers []Gatherer | |||
// Gather implements Gatherer. | |||
func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { | |||
var ( | |||
metricFamiliesByName = map[string]*dto.MetricFamily{} | |||
metricHashes = map[uint64]struct{}{} | |||
errs MultiError // The collected errors to return in the end. | |||
) | |||
for i, g := range gs { | |||
mfs, err := g.Gather() | |||
if err != nil { | |||
if multiErr, ok := err.(MultiError); ok { | |||
for _, err := range multiErr { | |||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) | |||
} | |||
} else { | |||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) | |||
} | |||
} | |||
for _, mf := range mfs { | |||
existingMF, exists := metricFamiliesByName[mf.GetName()] | |||
if exists { | |||
if existingMF.GetHelp() != mf.GetHelp() { | |||
errs = append(errs, fmt.Errorf( | |||
"gathered metric family %s has help %q but should have %q", | |||
mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), | |||
)) | |||
continue | |||
} | |||
if existingMF.GetType() != mf.GetType() { | |||
errs = append(errs, fmt.Errorf( | |||
"gathered metric family %s has type %s but should have %s", | |||
mf.GetName(), mf.GetType(), existingMF.GetType(), | |||
)) | |||
continue | |||
} | |||
} else { | |||
existingMF = &dto.MetricFamily{} | |||
existingMF.Name = mf.Name | |||
existingMF.Help = mf.Help | |||
existingMF.Type = mf.Type | |||
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { | |||
errs = append(errs, err) | |||
continue | |||
} | |||
metricFamiliesByName[mf.GetName()] = existingMF | |||
} | |||
for _, m := range mf.Metric { | |||
if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { | |||
errs = append(errs, err) | |||
continue | |||
} | |||
existingMF.Metric = append(existingMF.Metric, m) | |||
} | |||
} | |||
} | |||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() | |||
} | |||
// checkSuffixCollisions checks for collisions with the “magic” suffixes the | |||
// Prometheus text format and the internal metric representation of the | |||
// Prometheus server add while flattening Summaries and Histograms. | |||
func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { | |||
var ( | |||
newName = mf.GetName() | |||
newType = mf.GetType() | |||
newNameWithoutSuffix = "" | |||
) | |||
switch { | |||
case strings.HasSuffix(newName, "_count"): | |||
newNameWithoutSuffix = newName[:len(newName)-6] | |||
case strings.HasSuffix(newName, "_sum"): | |||
newNameWithoutSuffix = newName[:len(newName)-4] | |||
case strings.HasSuffix(newName, "_bucket"): | |||
newNameWithoutSuffix = newName[:len(newName)-7] | |||
} | |||
if newNameWithoutSuffix != "" { | |||
if existingMF, ok := mfs[newNameWithoutSuffix]; ok { | |||
switch existingMF.GetType() { | |||
case dto.MetricType_SUMMARY: | |||
if !strings.HasSuffix(newName, "_bucket") { | |||
return fmt.Errorf( | |||
"collected metric named %q collides with previously collected summary named %q", | |||
newName, newNameWithoutSuffix, | |||
) | |||
} | |||
case dto.MetricType_HISTOGRAM: | |||
return fmt.Errorf( | |||
"collected metric named %q collides with previously collected histogram named %q", | |||
newName, newNameWithoutSuffix, | |||
) | |||
} | |||
} | |||
} | |||
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { | |||
if _, ok := mfs[newName+"_count"]; ok { | |||
return fmt.Errorf( | |||
"collected histogram or summary named %q collides with previously collected metric named %q", | |||
newName, newName+"_count", | |||
) | |||
} | |||
if _, ok := mfs[newName+"_sum"]; ok { | |||
return fmt.Errorf( | |||
"collected histogram or summary named %q collides with previously collected metric named %q", | |||
newName, newName+"_sum", | |||
) | |||
} | |||
} | |||
if newType == dto.MetricType_HISTOGRAM { | |||
if _, ok := mfs[newName+"_bucket"]; ok { | |||
return fmt.Errorf( | |||
"collected histogram named %q collides with previously collected metric named %q", | |||
newName, newName+"_bucket", | |||
) | |||
} | |||
} | |||
return nil | |||
} | |||
// checkMetricConsistency checks if the provided Metric is consistent with the | |||
// provided MetricFamily. It also hashes the Metric labels and the MetricFamily | |||
// name. If the resulting hash is already in the provided metricHashes, an error | |||
// is returned. If not, it is added to metricHashes. | |||
func checkMetricConsistency( | |||
metricFamily *dto.MetricFamily, | |||
dtoMetric *dto.Metric, | |||
metricHashes map[uint64]struct{}, | |||
) error { | |||
name := metricFamily.GetName() | |||
// Type consistency with metric family. | |||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || | |||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || | |||
metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || | |||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || | |||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { | |||
return fmt.Errorf( | |||
"collected metric %q { %s} is not a %s", | |||
name, dtoMetric, metricFamily.GetType(), | |||
) | |||
} | |||
previousLabelName := "" | |||
for _, labelPair := range dtoMetric.GetLabel() { | |||
labelName := labelPair.GetName() | |||
if labelName == previousLabelName { | |||
return fmt.Errorf( | |||
"collected metric %q { %s} has two or more labels with the same name: %s", | |||
name, dtoMetric, labelName, | |||
) | |||
} | |||
if !checkLabelName(labelName) { | |||
return fmt.Errorf( | |||
"collected metric %q { %s} has a label with an invalid name: %s", | |||
name, dtoMetric, labelName, | |||
) | |||
} | |||
if dtoMetric.Summary != nil && labelName == quantileLabel { | |||
return fmt.Errorf( | |||
"collected metric %q { %s} must not have an explicit %q label", | |||
name, dtoMetric, quantileLabel, | |||
) | |||
} | |||
if !utf8.ValidString(labelPair.GetValue()) { | |||
return fmt.Errorf( | |||
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v", | |||
name, dtoMetric, labelName, labelPair.GetValue()) | |||
} | |||
previousLabelName = labelName | |||
} | |||
// Is the metric unique (i.e. no other metric with the same name and the same labels)? | |||
h := hashNew() | |||
h = hashAdd(h, name) | |||
h = hashAddByte(h, separatorByte) | |||
// Make sure label pairs are sorted. We depend on it for the consistency | |||
// check. | |||
sort.Sort(labelPairSorter(dtoMetric.Label)) | |||
for _, lp := range dtoMetric.Label { | |||
h = hashAdd(h, lp.GetName()) | |||
h = hashAddByte(h, separatorByte) | |||
h = hashAdd(h, lp.GetValue()) | |||
h = hashAddByte(h, separatorByte) | |||
} | |||
if _, exists := metricHashes[h]; exists { | |||
return fmt.Errorf( | |||
"collected metric %q { %s} was collected before with the same name and label values", | |||
name, dtoMetric, | |||
) | |||
} | |||
metricHashes[h] = struct{}{} | |||
return nil | |||
} | |||
func checkDescConsistency( | |||
metricFamily *dto.MetricFamily, | |||
dtoMetric *dto.Metric, | |||
desc *Desc, | |||
) error { | |||
// Desc help consistency with metric family help. | |||
if metricFamily.GetHelp() != desc.help { | |||
return fmt.Errorf( | |||
"collected metric %s %s has help %q but should have %q", | |||
metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, | |||
) | |||
} | |||
// Is the desc consistent with the content of the metric? | |||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) | |||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) | |||
for _, l := range desc.variableLabels { | |||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ | |||
Name: proto.String(l), | |||
}) | |||
} | |||
if len(lpsFromDesc) != len(dtoMetric.Label) { | |||
return fmt.Errorf( | |||
"labels in collected metric %s %s are inconsistent with descriptor %s", | |||
metricFamily.GetName(), dtoMetric, desc, | |||
) | |||
} | |||
sort.Sort(labelPairSorter(lpsFromDesc)) | |||
for i, lpFromDesc := range lpsFromDesc { | |||
lpFromMetric := dtoMetric.Label[i] | |||
if lpFromDesc.GetName() != lpFromMetric.GetName() || | |||
lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { | |||
return fmt.Errorf( | |||
"labels in collected metric %s %s are inconsistent with descriptor %s", | |||
metricFamily.GetName(), dtoMetric, desc, | |||
) | |||
} | |||
} | |||
return nil | |||
} |
@@ -0,0 +1,626 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"fmt" | |||
"math" | |||
"sort" | |||
"sync" | |||
"time" | |||
"github.com/beorn7/perks/quantile" | |||
"github.com/golang/protobuf/proto" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// quantileLabel is used for the label that defines the quantile in a | |||
// summary. | |||
const quantileLabel = "quantile" | |||
// A Summary captures individual observations from an event or sample stream and | |||
// summarizes them in a manner similar to traditional summary statistics: 1. sum | |||
// of observations, 2. observation count, 3. rank estimations. | |||
// | |||
// A typical use-case is the observation of request latencies. By default, a | |||
// Summary provides the median, the 90th and the 99th percentile of the latency | |||
// as rank estimations. However, the default behavior will change in the | |||
// upcoming v0.10 of the library. There will be no rank estimations at all by | |||
// default. For a sane transition, it is recommended to set the desired rank | |||
// estimations explicitly. | |||
// | |||
// Note that the rank estimations cannot be aggregated in a meaningful way with | |||
// the Prometheus query language (i.e. you cannot average or add them). If you | |||
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all | |||
// queries served across all instances of a service), consider the Histogram | |||
// metric type. See the Prometheus documentation for more details. | |||
// | |||
// To create Summary instances, use NewSummary. | |||
type Summary interface { | |||
Metric | |||
Collector | |||
// Observe adds a single observation to the summary. | |||
Observe(float64) | |||
} | |||
// DefObjectives are the default Summary quantile values. | |||
// | |||
// Deprecated: DefObjectives will not be used as the default objectives in | |||
// v0.10 of the library. The default Summary will have no quantiles then. | |||
var ( | |||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} | |||
errQuantileLabelNotAllowed = fmt.Errorf( | |||
"%q is not allowed as label name in summaries", quantileLabel, | |||
) | |||
) | |||
// Default values for SummaryOpts. | |||
const ( | |||
// DefMaxAge is the default duration for which observations stay | |||
// relevant. | |||
DefMaxAge time.Duration = 10 * time.Minute | |||
// DefAgeBuckets is the default number of buckets used to calculate the | |||
// age of observations. | |||
DefAgeBuckets = 5 | |||
// DefBufCap is the standard buffer size for collecting Summary observations. | |||
DefBufCap = 500 | |||
) | |||
// SummaryOpts bundles the options for creating a Summary metric. It is | |||
// mandatory to set Name to a non-empty string. While all other fields are | |||
// optional and can safely be left at their zero value, it is recommended to set | |||
// a help string and to explicitly set the Objectives field to the desired value | |||
// as the default value will change in the upcoming v0.10 of the library. | |||
type SummaryOpts struct { | |||
// Namespace, Subsystem, and Name are components of the fully-qualified | |||
// name of the Summary (created by joining these components with | |||
// "_"). Only Name is mandatory, the others merely help structuring the | |||
// name. Note that the fully-qualified name of the Summary must be a | |||
// valid Prometheus metric name. | |||
Namespace string | |||
Subsystem string | |||
Name string | |||
// Help provides information about this Summary. | |||
// | |||
// Metrics with the same fully-qualified name must have the same Help | |||
// string. | |||
Help string | |||
// ConstLabels are used to attach fixed labels to this metric. Metrics | |||
// with the same fully-qualified name must have the same label names in | |||
// their ConstLabels. | |||
// | |||
// Due to the way a Summary is represented in the Prometheus text format | |||
// and how it is handled by the Prometheus server internally, “quantile” | |||
// is an illegal label name. Construction of a Summary or SummaryVec | |||
// will panic if this label name is used in ConstLabels. | |||
// | |||
// ConstLabels are only used rarely. In particular, do not use them to | |||
// attach the same labels to all your metrics. Those use cases are | |||
// better covered by target labels set by the scraping Prometheus | |||
// server, or by one specific metric (e.g. a build_info or a | |||
// machine_role metric). See also | |||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels | |||
ConstLabels Labels | |||
// Objectives defines the quantile rank estimates with their respective | |||
// absolute error. If Objectives[q] = e, then the value reported for q | |||
// will be the φ-quantile value for some φ between q-e and q+e. The | |||
// default value is DefObjectives. It is used if Objectives is left at | |||
// its zero value (i.e. nil). To create a Summary without Objectives, | |||
// set it to an empty map (i.e. map[float64]float64{}). | |||
// | |||
// Deprecated: Note that the current value of DefObjectives is | |||
// deprecated. It will be replaced by an empty map in v0.10 of the | |||
// library. Please explicitly set Objectives to the desired value. | |||
Objectives map[float64]float64 | |||
// MaxAge defines the duration for which an observation stays relevant | |||
// for the summary. Must be positive. The default value is DefMaxAge. | |||
MaxAge time.Duration | |||
// AgeBuckets is the number of buckets used to exclude observations that | |||
// are older than MaxAge from the summary. A higher number has a | |||
// resource penalty, so only increase it if the higher resolution is | |||
// really required. For very high observation rates, you might want to | |||
// reduce the number of age buckets. With only one age bucket, you will | |||
// effectively see a complete reset of the summary each time MaxAge has | |||
// passed. The default value is DefAgeBuckets. | |||
AgeBuckets uint32 | |||
// BufCap defines the default sample stream buffer size. The default | |||
// value of DefBufCap should suffice for most uses. If there is a need | |||
// to increase the value, a multiple of 500 is recommended (because that | |||
// is the internal buffer size of the underlying package | |||
// "github.com/bmizerany/perks/quantile"). | |||
BufCap uint32 | |||
} | |||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of | |||
// perk/quantile is actually not working as advertised - and it might be | |||
// unfixable, as the underlying algorithm is apparently not capable of merging | |||
// summaries in the first place. To avoid using Merge, we are currently adding | |||
// observations to _each_ age bucket, i.e. the effort to add a sample is | |||
// essentially multiplied by the number of age buckets. When rotating age | |||
// buckets, we empty the previous head stream. On scrape time, we simply take | |||
// the quantiles from the head stream (no merging required). Result: More effort | |||
// on observation time, less effort on scrape time, which is exactly the | |||
// opposite of what we try to accomplish, but at least the results are correct. | |||
// | |||
// The quite elegant previous contraption to merge the age buckets efficiently | |||
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) | |||
// can't be used anymore. | |||
// NewSummary creates a new Summary based on the provided SummaryOpts. | |||
func NewSummary(opts SummaryOpts) Summary { | |||
return newSummary( | |||
NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
), | |||
opts, | |||
) | |||
} | |||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { | |||
if len(desc.variableLabels) != len(labelValues) { | |||
panic(errInconsistentCardinality) | |||
} | |||
for _, n := range desc.variableLabels { | |||
if n == quantileLabel { | |||
panic(errQuantileLabelNotAllowed) | |||
} | |||
} | |||
for _, lp := range desc.constLabelPairs { | |||
if lp.GetName() == quantileLabel { | |||
panic(errQuantileLabelNotAllowed) | |||
} | |||
} | |||
if opts.Objectives == nil { | |||
opts.Objectives = DefObjectives | |||
} | |||
if opts.MaxAge < 0 { | |||
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) | |||
} | |||
if opts.MaxAge == 0 { | |||
opts.MaxAge = DefMaxAge | |||
} | |||
if opts.AgeBuckets == 0 { | |||
opts.AgeBuckets = DefAgeBuckets | |||
} | |||
if opts.BufCap == 0 { | |||
opts.BufCap = DefBufCap | |||
} | |||
s := &summary{ | |||
desc: desc, | |||
objectives: opts.Objectives, | |||
sortedObjectives: make([]float64, 0, len(opts.Objectives)), | |||
labelPairs: makeLabelPairs(desc, labelValues), | |||
hotBuf: make([]float64, 0, opts.BufCap), | |||
coldBuf: make([]float64, 0, opts.BufCap), | |||
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), | |||
} | |||
s.headStreamExpTime = time.Now().Add(s.streamDuration) | |||
s.hotBufExpTime = s.headStreamExpTime | |||
for i := uint32(0); i < opts.AgeBuckets; i++ { | |||
s.streams = append(s.streams, s.newStream()) | |||
} | |||
s.headStream = s.streams[0] | |||
for qu := range s.objectives { | |||
s.sortedObjectives = append(s.sortedObjectives, qu) | |||
} | |||
sort.Float64s(s.sortedObjectives) | |||
s.init(s) // Init self-collection. | |||
return s | |||
} | |||
type summary struct { | |||
selfCollector | |||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. | |||
mtx sync.Mutex // Protects every other moving part. | |||
// Lock bufMtx before mtx if both are needed. | |||
desc *Desc | |||
objectives map[float64]float64 | |||
sortedObjectives []float64 | |||
labelPairs []*dto.LabelPair | |||
sum float64 | |||
cnt uint64 | |||
hotBuf, coldBuf []float64 | |||
streams []*quantile.Stream | |||
streamDuration time.Duration | |||
headStream *quantile.Stream | |||
headStreamIdx int | |||
headStreamExpTime, hotBufExpTime time.Time | |||
} | |||
func (s *summary) Desc() *Desc { | |||
return s.desc | |||
} | |||
func (s *summary) Observe(v float64) { | |||
s.bufMtx.Lock() | |||
defer s.bufMtx.Unlock() | |||
now := time.Now() | |||
if now.After(s.hotBufExpTime) { | |||
s.asyncFlush(now) | |||
} | |||
s.hotBuf = append(s.hotBuf, v) | |||
if len(s.hotBuf) == cap(s.hotBuf) { | |||
s.asyncFlush(now) | |||
} | |||
} | |||
func (s *summary) Write(out *dto.Metric) error { | |||
sum := &dto.Summary{} | |||
qs := make([]*dto.Quantile, 0, len(s.objectives)) | |||
s.bufMtx.Lock() | |||
s.mtx.Lock() | |||
// Swap bufs even if hotBuf is empty to set new hotBufExpTime. | |||
s.swapBufs(time.Now()) | |||
s.bufMtx.Unlock() | |||
s.flushColdBuf() | |||
sum.SampleCount = proto.Uint64(s.cnt) | |||
sum.SampleSum = proto.Float64(s.sum) | |||
for _, rank := range s.sortedObjectives { | |||
var q float64 | |||
if s.headStream.Count() == 0 { | |||
q = math.NaN() | |||
} else { | |||
q = s.headStream.Query(rank) | |||
} | |||
qs = append(qs, &dto.Quantile{ | |||
Quantile: proto.Float64(rank), | |||
Value: proto.Float64(q), | |||
}) | |||
} | |||
s.mtx.Unlock() | |||
if len(qs) > 0 { | |||
sort.Sort(quantSort(qs)) | |||
} | |||
sum.Quantile = qs | |||
out.Summary = sum | |||
out.Label = s.labelPairs | |||
return nil | |||
} | |||
func (s *summary) newStream() *quantile.Stream { | |||
return quantile.NewTargeted(s.objectives) | |||
} | |||
// asyncFlush needs bufMtx locked. | |||
func (s *summary) asyncFlush(now time.Time) { | |||
s.mtx.Lock() | |||
s.swapBufs(now) | |||
// Unblock the original goroutine that was responsible for the mutation | |||
// that triggered the compaction. But hold onto the global non-buffer | |||
// state mutex until the operation finishes. | |||
go func() { | |||
s.flushColdBuf() | |||
s.mtx.Unlock() | |||
}() | |||
} | |||
// rotateStreams needs mtx AND bufMtx locked. | |||
func (s *summary) maybeRotateStreams() { | |||
for !s.hotBufExpTime.Equal(s.headStreamExpTime) { | |||
s.headStream.Reset() | |||
s.headStreamIdx++ | |||
if s.headStreamIdx >= len(s.streams) { | |||
s.headStreamIdx = 0 | |||
} | |||
s.headStream = s.streams[s.headStreamIdx] | |||
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) | |||
} | |||
} | |||
// flushColdBuf needs mtx locked. | |||
func (s *summary) flushColdBuf() { | |||
for _, v := range s.coldBuf { | |||
for _, stream := range s.streams { | |||
stream.Insert(v) | |||
} | |||
s.cnt++ | |||
s.sum += v | |||
} | |||
s.coldBuf = s.coldBuf[0:0] | |||
s.maybeRotateStreams() | |||
} | |||
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. | |||
func (s *summary) swapBufs(now time.Time) { | |||
if len(s.coldBuf) != 0 { | |||
panic("coldBuf is not empty") | |||
} | |||
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf | |||
// hotBuf is now empty and gets new expiration set. | |||
for now.After(s.hotBufExpTime) { | |||
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) | |||
} | |||
} | |||
type quantSort []*dto.Quantile | |||
func (s quantSort) Len() int { | |||
return len(s) | |||
} | |||
func (s quantSort) Swap(i, j int) { | |||
s[i], s[j] = s[j], s[i] | |||
} | |||
func (s quantSort) Less(i, j int) bool { | |||
return s[i].GetQuantile() < s[j].GetQuantile() | |||
} | |||
// SummaryVec is a Collector that bundles a set of Summaries that all share the | |||
// same Desc, but have different values for their variable labels. This is used | |||
// if you want to count the same thing partitioned by various dimensions | |||
// (e.g. HTTP request latencies, partitioned by status code and method). Create | |||
// instances with NewSummaryVec. | |||
type SummaryVec struct { | |||
*metricVec | |||
} | |||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and | |||
// partitioned by the given label names. | |||
// | |||
// Due to the way a Summary is represented in the Prometheus text format and how | |||
// it is handled by the Prometheus server internally, “quantile” is an illegal | |||
// label name. NewSummaryVec will panic if this label name is used. | |||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { | |||
for _, ln := range labelNames { | |||
if ln == quantileLabel { | |||
panic(errQuantileLabelNotAllowed) | |||
} | |||
} | |||
desc := NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
labelNames, | |||
opts.ConstLabels, | |||
) | |||
return &SummaryVec{ | |||
metricVec: newMetricVec(desc, func(lvs ...string) Metric { | |||
return newSummary(desc, opts, lvs...) | |||
}), | |||
} | |||
} | |||
// GetMetricWithLabelValues returns the Summary for the given slice of label | |||
// values (same order as the VariableLabels in Desc). If that combination of | |||
// label values is accessed for the first time, a new Summary is created. | |||
// | |||
// It is possible to call this method without using the returned Summary to only | |||
// create the new Summary but leave it at its starting value, a Summary without | |||
// any observations. | |||
// | |||
// Keeping the Summary for later use is possible (and should be considered if | |||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and | |||
// Delete can be used to delete the Summary from the SummaryVec. In that case, | |||
// the Summary will still exist, but it will not be exported anymore, even if a | |||
// Summary with the same label values is created later. See also the CounterVec | |||
// example. | |||
// | |||
// An error is returned if the number of label values is not the same as the | |||
// number of VariableLabels in Desc (minus any curried labels). | |||
// | |||
// Note that for more than one label value, this method is prone to mistakes | |||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as | |||
// an alternative to avoid that type of mistake. For higher label numbers, the | |||
// latter has a much more readable (albeit more verbose) syntax, but it comes | |||
// with a performance overhead (for creating and processing the Labels map). | |||
// See also the GaugeVec example. | |||
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { | |||
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) | |||
if metric != nil { | |||
return metric.(Observer), err | |||
} | |||
return nil, err | |||
} | |||
// GetMetricWith returns the Summary for the given Labels map (the label names | |||
// must match those of the VariableLabels in Desc). If that label map is | |||
// accessed for the first time, a new Summary is created. Implications of | |||
// creating a Summary without using it and keeping the Summary for later use are | |||
// the same as for GetMetricWithLabelValues. | |||
// | |||
// An error is returned if the number and names of the Labels are inconsistent | |||
// with those of the VariableLabels in Desc (minus any curried labels). | |||
// | |||
// This method is used for the same purpose as | |||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two | |||
// methods. | |||
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { | |||
metric, err := v.metricVec.getMetricWith(labels) | |||
if metric != nil { | |||
return metric.(Observer), err | |||
} | |||
return nil, err | |||
} | |||
// WithLabelValues works as GetMetricWithLabelValues, but panics where | |||
// GetMetricWithLabelValues would have returned an error. Not returning an | |||
// error allows shortcuts like | |||
// myVec.WithLabelValues("404", "GET").Observe(42.21) | |||
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { | |||
s, err := v.GetMetricWithLabelValues(lvs...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return s | |||
} | |||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have | |||
// returned an error. Not returning an error allows shortcuts like | |||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) | |||
func (v *SummaryVec) With(labels Labels) Observer { | |||
s, err := v.GetMetricWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return s | |||
} | |||
// CurryWith returns a vector curried with the provided labels, i.e. the | |||
// returned vector has those labels pre-set for all labeled operations performed | |||
// on it. The cardinality of the curried vector is reduced accordingly. The | |||
// order of the remaining labels stays the same (just with the curried labels | |||
// taken out of the sequence – which is relevant for the | |||
// (GetMetric)WithLabelValues methods). It is possible to curry a curried | |||
// vector, but only with labels not yet used for currying before. | |||
// | |||
// The metrics contained in the SummaryVec are shared between the curried and | |||
// uncurried vectors. They are just accessed differently. Curried and uncurried | |||
// vectors behave identically in terms of collection. Only one must be | |||
// registered with a given registry (usually the uncurried version). The Reset | |||
// method deletes all metrics, even if called on a curried vector. | |||
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { | |||
vec, err := v.curryWith(labels) | |||
if vec != nil { | |||
return &SummaryVec{vec}, err | |||
} | |||
return nil, err | |||
} | |||
// MustCurryWith works as CurryWith but panics where CurryWith would have | |||
// returned an error. | |||
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { | |||
vec, err := v.CurryWith(labels) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return vec | |||
} | |||
type constSummary struct { | |||
desc *Desc | |||
count uint64 | |||
sum float64 | |||
quantiles map[float64]float64 | |||
labelPairs []*dto.LabelPair | |||
} | |||
func (s *constSummary) Desc() *Desc { | |||
return s.desc | |||
} | |||
func (s *constSummary) Write(out *dto.Metric) error { | |||
sum := &dto.Summary{} | |||
qs := make([]*dto.Quantile, 0, len(s.quantiles)) | |||
sum.SampleCount = proto.Uint64(s.count) | |||
sum.SampleSum = proto.Float64(s.sum) | |||
for rank, q := range s.quantiles { | |||
qs = append(qs, &dto.Quantile{ | |||
Quantile: proto.Float64(rank), | |||
Value: proto.Float64(q), | |||
}) | |||
} | |||
if len(qs) > 0 { | |||
sort.Sort(quantSort(qs)) | |||
} | |||
sum.Quantile = qs | |||
out.Summary = sum | |||
out.Label = s.labelPairs | |||
return nil | |||
} | |||
// NewConstSummary returns a metric representing a Prometheus summary with fixed | |||
// values for the count, sum, and quantiles. As those parameters cannot be | |||
// changed, the returned value does not implement the Summary interface (but | |||
// only the Metric interface). Users of this package will not have much use for | |||
// it in regular operations. However, when implementing custom Collectors, it is | |||
// useful as a throw-away metric that is generated on the fly to send it to | |||
// Prometheus in the Collect method. | |||
// | |||
// quantiles maps ranks to quantile values. For example, a median latency of | |||
// 0.23s and a 99th percentile latency of 0.56s would be expressed as: | |||
// map[float64]float64{0.5: 0.23, 0.99: 0.56} | |||
// | |||
// NewConstSummary returns an error if the length of labelValues is not | |||
// consistent with the variable labels in Desc or if Desc is invalid. | |||
func NewConstSummary( | |||
desc *Desc, | |||
count uint64, | |||
sum float64, | |||
quantiles map[float64]float64, | |||
labelValues ...string, | |||
) (Metric, error) { | |||
if desc.err != nil { | |||
return nil, desc.err | |||
} | |||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | |||
return nil, err | |||
} | |||
return &constSummary{ | |||
desc: desc, | |||
count: count, | |||
sum: sum, | |||
quantiles: quantiles, | |||
labelPairs: makeLabelPairs(desc, labelValues), | |||
}, nil | |||
} | |||
// MustNewConstSummary is a version of NewConstSummary that panics where | |||
// NewConstMetric would have returned an error. | |||
func MustNewConstSummary( | |||
desc *Desc, | |||
count uint64, | |||
sum float64, | |||
quantiles map[float64]float64, | |||
labelValues ...string, | |||
) Metric { | |||
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return m | |||
} |
@@ -0,0 +1,51 @@ | |||
// Copyright 2016 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import "time" | |||
// Timer is a helper type to time functions. Use NewTimer to create new | |||
// instances. | |||
type Timer struct { | |||
begin time.Time | |||
observer Observer | |||
} | |||
// NewTimer creates a new Timer. The provided Observer is used to observe a | |||
// duration in seconds. Timer is usually used to time a function call in the | |||
// following way: | |||
// func TimeMe() { | |||
// timer := NewTimer(myHistogram) | |||
// defer timer.ObserveDuration() | |||
// // Do actual work. | |||
// } | |||
func NewTimer(o Observer) *Timer { | |||
return &Timer{ | |||
begin: time.Now(), | |||
observer: o, | |||
} | |||
} | |||
// ObserveDuration records the duration passed since the Timer was created with | |||
// NewTimer. It calls the Observe method of the Observer provided during | |||
// construction with the duration in seconds as an argument. ObserveDuration is | |||
// usually called with a defer statement. | |||
// | |||
// Note that this method is only guaranteed to never observe negative durations | |||
// if used with Go1.9+. | |||
func (t *Timer) ObserveDuration() { | |||
if t.observer != nil { | |||
t.observer.Observe(time.Since(t.begin).Seconds()) | |||
} | |||
} |
@@ -0,0 +1,42 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
// UntypedOpts is an alias for Opts. See there for doc comments. | |||
type UntypedOpts Opts | |||
// UntypedFunc works like GaugeFunc but the collected metric is of type | |||
// "Untyped". UntypedFunc is useful to mirror an external metric of unknown | |||
// type. | |||
// | |||
// To create UntypedFunc instances, use NewUntypedFunc. | |||
type UntypedFunc interface { | |||
Metric | |||
Collector | |||
} | |||
// NewUntypedFunc creates a new UntypedFunc based on the provided | |||
// UntypedOpts. The value reported is determined by calling the given function | |||
// from within the Write method. Take into account that metric collection may | |||
// happen concurrently. If that results in concurrent calls to Write, like in | |||
// the case where an UntypedFunc is directly registered with Prometheus, the | |||
// provided function must be concurrency-safe. | |||
func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { | |||
return newValueFunc(NewDesc( | |||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), | |||
opts.Help, | |||
nil, | |||
opts.ConstLabels, | |||
), UntypedValue, function) | |||
} |
@@ -0,0 +1,162 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"fmt" | |||
"sort" | |||
"github.com/golang/protobuf/proto" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// ValueType is an enumeration of metric types that represent a simple value. | |||
type ValueType int | |||
// Possible values for the ValueType enum. | |||
const ( | |||
_ ValueType = iota | |||
CounterValue | |||
GaugeValue | |||
UntypedValue | |||
) | |||
// valueFunc is a generic metric for simple values retrieved on collect time | |||
// from a function. It implements Metric and Collector. Its effective type is | |||
// determined by ValueType. This is a low-level building block used by the | |||
// library to back the implementations of CounterFunc, GaugeFunc, and | |||
// UntypedFunc. | |||
type valueFunc struct { | |||
selfCollector | |||
desc *Desc | |||
valType ValueType | |||
function func() float64 | |||
labelPairs []*dto.LabelPair | |||
} | |||
// newValueFunc returns a newly allocated valueFunc with the given Desc and | |||
// ValueType. The value reported is determined by calling the given function | |||
// from within the Write method. Take into account that metric collection may | |||
// happen concurrently. If that results in concurrent calls to Write, like in | |||
// the case where a valueFunc is directly registered with Prometheus, the | |||
// provided function must be concurrency-safe. | |||
func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { | |||
result := &valueFunc{ | |||
desc: desc, | |||
valType: valueType, | |||
function: function, | |||
labelPairs: makeLabelPairs(desc, nil), | |||
} | |||
result.init(result) | |||
return result | |||
} | |||
func (v *valueFunc) Desc() *Desc { | |||
return v.desc | |||
} | |||
func (v *valueFunc) Write(out *dto.Metric) error { | |||
return populateMetric(v.valType, v.function(), v.labelPairs, out) | |||
} | |||
// NewConstMetric returns a metric with one fixed value that cannot be | |||
// changed. Users of this package will not have much use for it in regular | |||
// operations. However, when implementing custom Collectors, it is useful as a | |||
// throw-away metric that is generated on the fly to send it to Prometheus in | |||
// the Collect method. NewConstMetric returns an error if the length of | |||
// labelValues is not consistent with the variable labels in Desc or if Desc is | |||
// invalid. | |||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { | |||
if desc.err != nil { | |||
return nil, desc.err | |||
} | |||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { | |||
return nil, err | |||
} | |||
return &constMetric{ | |||
desc: desc, | |||
valType: valueType, | |||
val: value, | |||
labelPairs: makeLabelPairs(desc, labelValues), | |||
}, nil | |||
} | |||
// MustNewConstMetric is a version of NewConstMetric that panics where | |||
// NewConstMetric would have returned an error. | |||
func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { | |||
m, err := NewConstMetric(desc, valueType, value, labelValues...) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return m | |||
} | |||
type constMetric struct { | |||
desc *Desc | |||
valType ValueType | |||
val float64 | |||
labelPairs []*dto.LabelPair | |||
} | |||
func (m *constMetric) Desc() *Desc { | |||
return m.desc | |||
} | |||
func (m *constMetric) Write(out *dto.Metric) error { | |||
return populateMetric(m.valType, m.val, m.labelPairs, out) | |||
} | |||
func populateMetric( | |||
t ValueType, | |||
v float64, | |||
labelPairs []*dto.LabelPair, | |||
m *dto.Metric, | |||
) error { | |||
m.Label = labelPairs | |||
switch t { | |||
case CounterValue: | |||
m.Counter = &dto.Counter{Value: proto.Float64(v)} | |||
case GaugeValue: | |||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)} | |||
case UntypedValue: | |||
m.Untyped = &dto.Untyped{Value: proto.Float64(v)} | |||
default: | |||
return fmt.Errorf("encountered unknown type %v", t) | |||
} | |||
return nil | |||
} | |||
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { | |||
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) | |||
if totalLen == 0 { | |||
// Super fast path. | |||
return nil | |||
} | |||
if len(desc.variableLabels) == 0 { | |||
// Moderately fast path. | |||
return desc.constLabelPairs | |||
} | |||
labelPairs := make([]*dto.LabelPair, 0, totalLen) | |||
for i, n := range desc.variableLabels { | |||
labelPairs = append(labelPairs, &dto.LabelPair{ | |||
Name: proto.String(n), | |||
Value: proto.String(labelValues[i]), | |||
}) | |||
} | |||
labelPairs = append(labelPairs, desc.constLabelPairs...) | |||
sort.Sort(labelPairSorter(labelPairs)) | |||
return labelPairs | |||
} |
@@ -0,0 +1,472 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"fmt" | |||
"sync" | |||
"github.com/prometheus/common/model" | |||
) | |||
// metricVec is a Collector to bundle metrics of the same name that differ in | |||
// their label values. metricVec is not used directly (and therefore | |||
// unexported). It is used as a building block for implementations of vectors of | |||
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. | |||
// It also handles label currying. It uses basicMetricVec internally. | |||
type metricVec struct { | |||
*metricMap | |||
curry []curriedLabelValue | |||
// hashAdd and hashAddByte can be replaced for testing collision handling. | |||
hashAdd func(h uint64, s string) uint64 | |||
hashAddByte func(h uint64, b byte) uint64 | |||
} | |||
// newMetricVec returns an initialized metricVec. | |||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { | |||
return &metricVec{ | |||
metricMap: &metricMap{ | |||
metrics: map[uint64][]metricWithLabelValues{}, | |||
desc: desc, | |||
newMetric: newMetric, | |||
}, | |||
hashAdd: hashAdd, | |||
hashAddByte: hashAddByte, | |||
} | |||
} | |||
// DeleteLabelValues removes the metric where the variable labels are the same | |||
// as those passed in as labels (same order as the VariableLabels in Desc). It | |||
// returns true if a metric was deleted. | |||
// | |||
// It is not an error if the number of label values is not the same as the | |||
// number of VariableLabels in Desc. However, such inconsistent label count can | |||
// never match an actual metric, so the method will always return false in that | |||
// case. | |||
// | |||
// Note that for more than one label value, this method is prone to mistakes | |||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an | |||
// alternative to avoid that type of mistake. For higher label numbers, the | |||
// latter has a much more readable (albeit more verbose) syntax, but it comes | |||
// with a performance overhead (for creating and processing the Labels map). | |||
// See also the CounterVec example. | |||
func (m *metricVec) DeleteLabelValues(lvs ...string) bool { | |||
h, err := m.hashLabelValues(lvs) | |||
if err != nil { | |||
return false | |||
} | |||
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) | |||
} | |||
// Delete deletes the metric where the variable labels are the same as those | |||
// passed in as labels. It returns true if a metric was deleted. | |||
// | |||
// It is not an error if the number and names of the Labels are inconsistent | |||
// with those of the VariableLabels in Desc. However, such inconsistent Labels | |||
// can never match an actual metric, so the method will always return false in | |||
// that case. | |||
// | |||
// This method is used for the same purpose as DeleteLabelValues(...string). See | |||
// there for pros and cons of the two methods. | |||
func (m *metricVec) Delete(labels Labels) bool { | |||
h, err := m.hashLabels(labels) | |||
if err != nil { | |||
return false | |||
} | |||
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) | |||
} | |||
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { | |||
var ( | |||
newCurry []curriedLabelValue | |||
oldCurry = m.curry | |||
iCurry int | |||
) | |||
for i, label := range m.desc.variableLabels { | |||
val, ok := labels[label] | |||
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { | |||
if ok { | |||
return nil, fmt.Errorf("label name %q is already curried", label) | |||
} | |||
newCurry = append(newCurry, oldCurry[iCurry]) | |||
iCurry++ | |||
} else { | |||
if !ok { | |||
continue // Label stays uncurried. | |||
} | |||
newCurry = append(newCurry, curriedLabelValue{i, val}) | |||
} | |||
} | |||
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { | |||
return nil, fmt.Errorf("%d unknown label(s) found during currying", l) | |||
} | |||
return &metricVec{ | |||
metricMap: m.metricMap, | |||
curry: newCurry, | |||
hashAdd: m.hashAdd, | |||
hashAddByte: m.hashAddByte, | |||
}, nil | |||
} | |||
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { | |||
h, err := m.hashLabelValues(lvs) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil | |||
} | |||
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { | |||
h, err := m.hashLabels(labels) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil | |||
} | |||
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { | |||
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { | |||
return 0, err | |||
} | |||
var ( | |||
h = hashNew() | |||
curry = m.curry | |||
iVals, iCurry int | |||
) | |||
for i := 0; i < len(m.desc.variableLabels); i++ { | |||
if iCurry < len(curry) && curry[iCurry].index == i { | |||
h = m.hashAdd(h, curry[iCurry].value) | |||
iCurry++ | |||
} else { | |||
h = m.hashAdd(h, vals[iVals]) | |||
iVals++ | |||
} | |||
h = m.hashAddByte(h, model.SeparatorByte) | |||
} | |||
return h, nil | |||
} | |||
func (m *metricVec) hashLabels(labels Labels) (uint64, error) { | |||
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { | |||
return 0, err | |||
} | |||
var ( | |||
h = hashNew() | |||
curry = m.curry | |||
iCurry int | |||
) | |||
for i, label := range m.desc.variableLabels { | |||
val, ok := labels[label] | |||
if iCurry < len(curry) && curry[iCurry].index == i { | |||
if ok { | |||
return 0, fmt.Errorf("label name %q is already curried", label) | |||
} | |||
h = m.hashAdd(h, curry[iCurry].value) | |||
iCurry++ | |||
} else { | |||
if !ok { | |||
return 0, fmt.Errorf("label name %q missing in label map", label) | |||
} | |||
h = m.hashAdd(h, val) | |||
} | |||
h = m.hashAddByte(h, model.SeparatorByte) | |||
} | |||
return h, nil | |||
} | |||
// metricWithLabelValues provides the metric and its label values for | |||
// disambiguation on hash collision. | |||
type metricWithLabelValues struct { | |||
values []string | |||
metric Metric | |||
} | |||
// curriedLabelValue sets the curried value for a label at the given index. | |||
type curriedLabelValue struct { | |||
index int | |||
value string | |||
} | |||
// metricMap is a helper for metricVec and shared between differently curried | |||
// metricVecs. | |||
type metricMap struct { | |||
mtx sync.RWMutex // Protects metrics. | |||
metrics map[uint64][]metricWithLabelValues | |||
desc *Desc | |||
newMetric func(labelValues ...string) Metric | |||
} | |||
// Describe implements Collector. It will send exactly one Desc to the provided | |||
// channel. | |||
func (m *metricMap) Describe(ch chan<- *Desc) { | |||
ch <- m.desc | |||
} | |||
// Collect implements Collector. | |||
func (m *metricMap) Collect(ch chan<- Metric) { | |||
m.mtx.RLock() | |||
defer m.mtx.RUnlock() | |||
for _, metrics := range m.metrics { | |||
for _, metric := range metrics { | |||
ch <- metric.metric | |||
} | |||
} | |||
} | |||
// Reset deletes all metrics in this vector. | |||
func (m *metricMap) Reset() { | |||
m.mtx.Lock() | |||
defer m.mtx.Unlock() | |||
for h := range m.metrics { | |||
delete(m.metrics, h) | |||
} | |||
} | |||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If | |||
// there are multiple matches in the bucket, use lvs to select a metric and | |||
// remove only that metric. | |||
func (m *metricMap) deleteByHashWithLabelValues( | |||
h uint64, lvs []string, curry []curriedLabelValue, | |||
) bool { | |||
m.mtx.Lock() | |||
defer m.mtx.Unlock() | |||
metrics, ok := m.metrics[h] | |||
if !ok { | |||
return false | |||
} | |||
i := findMetricWithLabelValues(metrics, lvs, curry) | |||
if i >= len(metrics) { | |||
return false | |||
} | |||
if len(metrics) > 1 { | |||
m.metrics[h] = append(metrics[:i], metrics[i+1:]...) | |||
} else { | |||
delete(m.metrics, h) | |||
} | |||
return true | |||
} | |||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there | |||
// are multiple matches in the bucket, use lvs to select a metric and remove | |||
// only that metric. | |||
func (m *metricMap) deleteByHashWithLabels( | |||
h uint64, labels Labels, curry []curriedLabelValue, | |||
) bool { | |||
m.mtx.Lock() | |||
defer m.mtx.Unlock() | |||
metrics, ok := m.metrics[h] | |||
if !ok { | |||
return false | |||
} | |||
i := findMetricWithLabels(m.desc, metrics, labels, curry) | |||
if i >= len(metrics) { | |||
return false | |||
} | |||
if len(metrics) > 1 { | |||
m.metrics[h] = append(metrics[:i], metrics[i+1:]...) | |||
} else { | |||
delete(m.metrics, h) | |||
} | |||
return true | |||
} | |||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value | |||
// or creates it and returns the new one. | |||
// | |||
// This function holds the mutex. | |||
func (m *metricMap) getOrCreateMetricWithLabelValues( | |||
hash uint64, lvs []string, curry []curriedLabelValue, | |||
) Metric { | |||
m.mtx.RLock() | |||
metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) | |||
m.mtx.RUnlock() | |||
if ok { | |||
return metric | |||
} | |||
m.mtx.Lock() | |||
defer m.mtx.Unlock() | |||
metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) | |||
if !ok { | |||
inlinedLVs := inlineLabelValues(lvs, curry) | |||
metric = m.newMetric(inlinedLVs...) | |||
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) | |||
} | |||
return metric | |||
} | |||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value | |||
// or creates it and returns the new one. | |||
// | |||
// This function holds the mutex. | |||
func (m *metricMap) getOrCreateMetricWithLabels( | |||
hash uint64, labels Labels, curry []curriedLabelValue, | |||
) Metric { | |||
m.mtx.RLock() | |||
metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) | |||
m.mtx.RUnlock() | |||
if ok { | |||
return metric | |||
} | |||
m.mtx.Lock() | |||
defer m.mtx.Unlock() | |||
metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) | |||
if !ok { | |||
lvs := extractLabelValues(m.desc, labels, curry) | |||
metric = m.newMetric(lvs...) | |||
m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) | |||
} | |||
return metric | |||
} | |||
// getMetricWithHashAndLabelValues gets a metric while handling possible | |||
// collisions in the hash space. Must be called while holding the read mutex. | |||
func (m *metricMap) getMetricWithHashAndLabelValues( | |||
h uint64, lvs []string, curry []curriedLabelValue, | |||
) (Metric, bool) { | |||
metrics, ok := m.metrics[h] | |||
if ok { | |||
if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { | |||
return metrics[i].metric, true | |||
} | |||
} | |||
return nil, false | |||
} | |||
// getMetricWithHashAndLabels gets a metric while handling possible collisions in | |||
// the hash space. Must be called while holding read mutex. | |||
func (m *metricMap) getMetricWithHashAndLabels( | |||
h uint64, labels Labels, curry []curriedLabelValue, | |||
) (Metric, bool) { | |||
metrics, ok := m.metrics[h] | |||
if ok { | |||
if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { | |||
return metrics[i].metric, true | |||
} | |||
} | |||
return nil, false | |||
} | |||
// findMetricWithLabelValues returns the index of the matching metric or | |||
// len(metrics) if not found. | |||
func findMetricWithLabelValues( | |||
metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, | |||
) int { | |||
for i, metric := range metrics { | |||
if matchLabelValues(metric.values, lvs, curry) { | |||
return i | |||
} | |||
} | |||
return len(metrics) | |||
} | |||
// findMetricWithLabels returns the index of the matching metric or len(metrics) | |||
// if not found. | |||
func findMetricWithLabels( | |||
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, | |||
) int { | |||
for i, metric := range metrics { | |||
if matchLabels(desc, metric.values, labels, curry) { | |||
return i | |||
} | |||
} | |||
return len(metrics) | |||
} | |||
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { | |||
if len(values) != len(lvs)+len(curry) { | |||
return false | |||
} | |||
var iLVs, iCurry int | |||
for i, v := range values { | |||
if iCurry < len(curry) && curry[iCurry].index == i { | |||
if v != curry[iCurry].value { | |||
return false | |||
} | |||
iCurry++ | |||
continue | |||
} | |||
if v != lvs[iLVs] { | |||
return false | |||
} | |||
iLVs++ | |||
} | |||
return true | |||
} | |||
func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { | |||
if len(values) != len(labels)+len(curry) { | |||
return false | |||
} | |||
iCurry := 0 | |||
for i, k := range desc.variableLabels { | |||
if iCurry < len(curry) && curry[iCurry].index == i { | |||
if values[i] != curry[iCurry].value { | |||
return false | |||
} | |||
iCurry++ | |||
continue | |||
} | |||
if values[i] != labels[k] { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { | |||
labelValues := make([]string, len(labels)+len(curry)) | |||
iCurry := 0 | |||
for i, k := range desc.variableLabels { | |||
if iCurry < len(curry) && curry[iCurry].index == i { | |||
labelValues[i] = curry[iCurry].value | |||
iCurry++ | |||
continue | |||
} | |||
labelValues[i] = labels[k] | |||
} | |||
return labelValues | |||
} | |||
func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { | |||
labelValues := make([]string, len(lvs)+len(curry)) | |||
var iCurry, iLVs int | |||
for i := range labelValues { | |||
if iCurry < len(curry) && curry[iCurry].index == i { | |||
labelValues[i] = curry[iCurry].value | |||
iCurry++ | |||
continue | |||
} | |||
labelValues[i] = lvs[iLVs] | |||
iLVs++ | |||
} | |||
return labelValues | |||
} |
@@ -0,0 +1,179 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package prometheus | |||
import ( | |||
"fmt" | |||
"sort" | |||
"github.com/golang/protobuf/proto" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// WrapRegistererWith returns a Registerer wrapping the provided | |||
// Registerer. Collectors registered with the returned Registerer will be | |||
// registered with the wrapped Registerer in a modified way. The modified | |||
// Collector adds the provided Labels to all Metrics it collects (as | |||
// ConstLabels). The Metrics collected by the unmodified Collector must not | |||
// duplicate any of those labels. | |||
// | |||
// WrapRegistererWith provides a way to add fixed labels to a subset of | |||
// Collectors. It should not be used to add fixed labels to all metrics exposed. | |||
// | |||
// The Collector example demonstrates a use of WrapRegistererWith. | |||
func WrapRegistererWith(labels Labels, reg Registerer) Registerer { | |||
return &wrappingRegisterer{ | |||
wrappedRegisterer: reg, | |||
labels: labels, | |||
} | |||
} | |||
// WrapRegistererWithPrefix returns a Registerer wrapping the provided | |||
// Registerer. Collectors registered with the returned Registerer will be | |||
// registered with the wrapped Registerer in a modified way. The modified | |||
// Collector adds the provided prefix to the name of all Metrics it collects. | |||
// | |||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of | |||
// a sub-system. To make this work, register metrics of the sub-system with the | |||
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful | |||
// to use the same prefix for all metrics exposed. In particular, do not prefix | |||
// metric names that are standardized across applications, as that would break | |||
// horizontal monitoring, for example the metrics provided by the Go collector | |||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In | |||
// fact, those metrics are already prefixed with “go_” or “process_”, | |||
// respectively.) | |||
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { | |||
return &wrappingRegisterer{ | |||
wrappedRegisterer: reg, | |||
prefix: prefix, | |||
} | |||
} | |||
type wrappingRegisterer struct { | |||
wrappedRegisterer Registerer | |||
prefix string | |||
labels Labels | |||
} | |||
func (r *wrappingRegisterer) Register(c Collector) error { | |||
return r.wrappedRegisterer.Register(&wrappingCollector{ | |||
wrappedCollector: c, | |||
prefix: r.prefix, | |||
labels: r.labels, | |||
}) | |||
} | |||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) { | |||
for _, c := range cs { | |||
if err := r.Register(c); err != nil { | |||
panic(err) | |||
} | |||
} | |||
} | |||
func (r *wrappingRegisterer) Unregister(c Collector) bool { | |||
return r.wrappedRegisterer.Unregister(&wrappingCollector{ | |||
wrappedCollector: c, | |||
prefix: r.prefix, | |||
labels: r.labels, | |||
}) | |||
} | |||
type wrappingCollector struct { | |||
wrappedCollector Collector | |||
prefix string | |||
labels Labels | |||
} | |||
func (c *wrappingCollector) Collect(ch chan<- Metric) { | |||
wrappedCh := make(chan Metric) | |||
go func() { | |||
c.wrappedCollector.Collect(wrappedCh) | |||
close(wrappedCh) | |||
}() | |||
for m := range wrappedCh { | |||
ch <- &wrappingMetric{ | |||
wrappedMetric: m, | |||
prefix: c.prefix, | |||
labels: c.labels, | |||
} | |||
} | |||
} | |||
func (c *wrappingCollector) Describe(ch chan<- *Desc) { | |||
wrappedCh := make(chan *Desc) | |||
go func() { | |||
c.wrappedCollector.Describe(wrappedCh) | |||
close(wrappedCh) | |||
}() | |||
for desc := range wrappedCh { | |||
ch <- wrapDesc(desc, c.prefix, c.labels) | |||
} | |||
} | |||
type wrappingMetric struct { | |||
wrappedMetric Metric | |||
prefix string | |||
labels Labels | |||
} | |||
func (m *wrappingMetric) Desc() *Desc { | |||
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) | |||
} | |||
func (m *wrappingMetric) Write(out *dto.Metric) error { | |||
if err := m.wrappedMetric.Write(out); err != nil { | |||
return err | |||
} | |||
if len(m.labels) == 0 { | |||
// No wrapping labels. | |||
return nil | |||
} | |||
for ln, lv := range m.labels { | |||
out.Label = append(out.Label, &dto.LabelPair{ | |||
Name: proto.String(ln), | |||
Value: proto.String(lv), | |||
}) | |||
} | |||
sort.Sort(labelPairSorter(out.Label)) | |||
return nil | |||
} | |||
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { | |||
constLabels := Labels{} | |||
for _, lp := range desc.constLabelPairs { | |||
constLabels[*lp.Name] = *lp.Value | |||
} | |||
for ln, lv := range labels { | |||
if _, alreadyUsed := constLabels[ln]; alreadyUsed { | |||
return &Desc{ | |||
fqName: desc.fqName, | |||
help: desc.help, | |||
variableLabels: desc.variableLabels, | |||
constLabelPairs: desc.constLabelPairs, | |||
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), | |||
} | |||
} | |||
constLabels[ln] = lv | |||
} | |||
// NewDesc will do remaining validations. | |||
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) | |||
// Propagate errors if there was any. This will override any errer | |||
// created by NewDesc above, i.e. earlier errors get precedence. | |||
if desc.err != nil { | |||
newDesc.err = desc.err | |||
} | |||
return newDesc | |||
} |
@@ -0,0 +1,201 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1,5 @@ | |||
Data model artifacts for Prometheus. | |||
Copyright 2012-2015 The Prometheus Authors | |||
This product includes software developed at | |||
SoundCloud Ltd. (http://soundcloud.com/). |
@@ -0,0 +1,629 @@ | |||
// Code generated by protoc-gen-go. DO NOT EDIT. | |||
// source: metrics.proto | |||
package io_prometheus_client // import "github.com/prometheus/client_model/go" | |||
import proto "github.com/golang/protobuf/proto" | |||
import fmt "fmt" | |||
import math "math" | |||
// Reference imports to suppress errors if they are not otherwise used. | |||
var _ = proto.Marshal | |||
var _ = fmt.Errorf | |||
var _ = math.Inf | |||
// This is a compile-time assertion to ensure that this generated file | |||
// is compatible with the proto package it is being compiled against. | |||
// A compilation error at this line likely means your copy of the | |||
// proto package needs to be updated. | |||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | |||
type MetricType int32 | |||
const ( | |||
MetricType_COUNTER MetricType = 0 | |||
MetricType_GAUGE MetricType = 1 | |||
MetricType_SUMMARY MetricType = 2 | |||
MetricType_UNTYPED MetricType = 3 | |||
MetricType_HISTOGRAM MetricType = 4 | |||
) | |||
var MetricType_name = map[int32]string{ | |||
0: "COUNTER", | |||
1: "GAUGE", | |||
2: "SUMMARY", | |||
3: "UNTYPED", | |||
4: "HISTOGRAM", | |||
} | |||
var MetricType_value = map[string]int32{ | |||
"COUNTER": 0, | |||
"GAUGE": 1, | |||
"SUMMARY": 2, | |||
"UNTYPED": 3, | |||
"HISTOGRAM": 4, | |||
} | |||
func (x MetricType) Enum() *MetricType { | |||
p := new(MetricType) | |||
*p = x | |||
return p | |||
} | |||
func (x MetricType) String() string { | |||
return proto.EnumName(MetricType_name, int32(x)) | |||
} | |||
func (x *MetricType) UnmarshalJSON(data []byte) error { | |||
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") | |||
if err != nil { | |||
return err | |||
} | |||
*x = MetricType(value) | |||
return nil | |||
} | |||
func (MetricType) EnumDescriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} | |||
} | |||
type LabelPair struct { | |||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | |||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *LabelPair) Reset() { *m = LabelPair{} } | |||
func (m *LabelPair) String() string { return proto.CompactTextString(m) } | |||
func (*LabelPair) ProtoMessage() {} | |||
func (*LabelPair) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} | |||
} | |||
func (m *LabelPair) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_LabelPair.Unmarshal(m, b) | |||
} | |||
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) | |||
} | |||
func (dst *LabelPair) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_LabelPair.Merge(dst, src) | |||
} | |||
func (m *LabelPair) XXX_Size() int { | |||
return xxx_messageInfo_LabelPair.Size(m) | |||
} | |||
func (m *LabelPair) XXX_DiscardUnknown() { | |||
xxx_messageInfo_LabelPair.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_LabelPair proto.InternalMessageInfo | |||
func (m *LabelPair) GetName() string { | |||
if m != nil && m.Name != nil { | |||
return *m.Name | |||
} | |||
return "" | |||
} | |||
func (m *LabelPair) GetValue() string { | |||
if m != nil && m.Value != nil { | |||
return *m.Value | |||
} | |||
return "" | |||
} | |||
type Gauge struct { | |||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Gauge) Reset() { *m = Gauge{} } | |||
func (m *Gauge) String() string { return proto.CompactTextString(m) } | |||
func (*Gauge) ProtoMessage() {} | |||
func (*Gauge) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} | |||
} | |||
func (m *Gauge) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Gauge.Unmarshal(m, b) | |||
} | |||
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Gauge) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Gauge.Merge(dst, src) | |||
} | |||
func (m *Gauge) XXX_Size() int { | |||
return xxx_messageInfo_Gauge.Size(m) | |||
} | |||
func (m *Gauge) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Gauge.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Gauge proto.InternalMessageInfo | |||
func (m *Gauge) GetValue() float64 { | |||
if m != nil && m.Value != nil { | |||
return *m.Value | |||
} | |||
return 0 | |||
} | |||
type Counter struct { | |||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Counter) Reset() { *m = Counter{} } | |||
func (m *Counter) String() string { return proto.CompactTextString(m) } | |||
func (*Counter) ProtoMessage() {} | |||
func (*Counter) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} | |||
} | |||
func (m *Counter) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Counter.Unmarshal(m, b) | |||
} | |||
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Counter.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Counter) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Counter.Merge(dst, src) | |||
} | |||
func (m *Counter) XXX_Size() int { | |||
return xxx_messageInfo_Counter.Size(m) | |||
} | |||
func (m *Counter) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Counter.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Counter proto.InternalMessageInfo | |||
func (m *Counter) GetValue() float64 { | |||
if m != nil && m.Value != nil { | |||
return *m.Value | |||
} | |||
return 0 | |||
} | |||
type Quantile struct { | |||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` | |||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Quantile) Reset() { *m = Quantile{} } | |||
func (m *Quantile) String() string { return proto.CompactTextString(m) } | |||
func (*Quantile) ProtoMessage() {} | |||
func (*Quantile) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} | |||
} | |||
func (m *Quantile) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Quantile.Unmarshal(m, b) | |||
} | |||
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Quantile) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Quantile.Merge(dst, src) | |||
} | |||
func (m *Quantile) XXX_Size() int { | |||
return xxx_messageInfo_Quantile.Size(m) | |||
} | |||
func (m *Quantile) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Quantile.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Quantile proto.InternalMessageInfo | |||
func (m *Quantile) GetQuantile() float64 { | |||
if m != nil && m.Quantile != nil { | |||
return *m.Quantile | |||
} | |||
return 0 | |||
} | |||
func (m *Quantile) GetValue() float64 { | |||
if m != nil && m.Value != nil { | |||
return *m.Value | |||
} | |||
return 0 | |||
} | |||
type Summary struct { | |||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` | |||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` | |||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Summary) Reset() { *m = Summary{} } | |||
func (m *Summary) String() string { return proto.CompactTextString(m) } | |||
func (*Summary) ProtoMessage() {} | |||
func (*Summary) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} | |||
} | |||
func (m *Summary) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Summary.Unmarshal(m, b) | |||
} | |||
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Summary.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Summary) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Summary.Merge(dst, src) | |||
} | |||
func (m *Summary) XXX_Size() int { | |||
return xxx_messageInfo_Summary.Size(m) | |||
} | |||
func (m *Summary) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Summary.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Summary proto.InternalMessageInfo | |||
func (m *Summary) GetSampleCount() uint64 { | |||
if m != nil && m.SampleCount != nil { | |||
return *m.SampleCount | |||
} | |||
return 0 | |||
} | |||
func (m *Summary) GetSampleSum() float64 { | |||
if m != nil && m.SampleSum != nil { | |||
return *m.SampleSum | |||
} | |||
return 0 | |||
} | |||
func (m *Summary) GetQuantile() []*Quantile { | |||
if m != nil { | |||
return m.Quantile | |||
} | |||
return nil | |||
} | |||
type Untyped struct { | |||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Untyped) Reset() { *m = Untyped{} } | |||
func (m *Untyped) String() string { return proto.CompactTextString(m) } | |||
func (*Untyped) ProtoMessage() {} | |||
func (*Untyped) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} | |||
} | |||
func (m *Untyped) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Untyped.Unmarshal(m, b) | |||
} | |||
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Untyped) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Untyped.Merge(dst, src) | |||
} | |||
func (m *Untyped) XXX_Size() int { | |||
return xxx_messageInfo_Untyped.Size(m) | |||
} | |||
func (m *Untyped) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Untyped.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Untyped proto.InternalMessageInfo | |||
func (m *Untyped) GetValue() float64 { | |||
if m != nil && m.Value != nil { | |||
return *m.Value | |||
} | |||
return 0 | |||
} | |||
type Histogram struct { | |||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` | |||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` | |||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Histogram) Reset() { *m = Histogram{} } | |||
func (m *Histogram) String() string { return proto.CompactTextString(m) } | |||
func (*Histogram) ProtoMessage() {} | |||
func (*Histogram) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} | |||
} | |||
func (m *Histogram) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Histogram.Unmarshal(m, b) | |||
} | |||
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Histogram) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Histogram.Merge(dst, src) | |||
} | |||
func (m *Histogram) XXX_Size() int { | |||
return xxx_messageInfo_Histogram.Size(m) | |||
} | |||
func (m *Histogram) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Histogram.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Histogram proto.InternalMessageInfo | |||
func (m *Histogram) GetSampleCount() uint64 { | |||
if m != nil && m.SampleCount != nil { | |||
return *m.SampleCount | |||
} | |||
return 0 | |||
} | |||
func (m *Histogram) GetSampleSum() float64 { | |||
if m != nil && m.SampleSum != nil { | |||
return *m.SampleSum | |||
} | |||
return 0 | |||
} | |||
func (m *Histogram) GetBucket() []*Bucket { | |||
if m != nil { | |||
return m.Bucket | |||
} | |||
return nil | |||
} | |||
type Bucket struct { | |||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` | |||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Bucket) Reset() { *m = Bucket{} } | |||
func (m *Bucket) String() string { return proto.CompactTextString(m) } | |||
func (*Bucket) ProtoMessage() {} | |||
func (*Bucket) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} | |||
} | |||
func (m *Bucket) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Bucket.Unmarshal(m, b) | |||
} | |||
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Bucket) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Bucket.Merge(dst, src) | |||
} | |||
func (m *Bucket) XXX_Size() int { | |||
return xxx_messageInfo_Bucket.Size(m) | |||
} | |||
func (m *Bucket) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Bucket.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Bucket proto.InternalMessageInfo | |||
func (m *Bucket) GetCumulativeCount() uint64 { | |||
if m != nil && m.CumulativeCount != nil { | |||
return *m.CumulativeCount | |||
} | |||
return 0 | |||
} | |||
func (m *Bucket) GetUpperBound() float64 { | |||
if m != nil && m.UpperBound != nil { | |||
return *m.UpperBound | |||
} | |||
return 0 | |||
} | |||
type Metric struct { | |||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` | |||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` | |||
Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` | |||
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` | |||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` | |||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` | |||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *Metric) Reset() { *m = Metric{} } | |||
func (m *Metric) String() string { return proto.CompactTextString(m) } | |||
func (*Metric) ProtoMessage() {} | |||
func (*Metric) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} | |||
} | |||
func (m *Metric) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_Metric.Unmarshal(m, b) | |||
} | |||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic) | |||
} | |||
func (dst *Metric) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_Metric.Merge(dst, src) | |||
} | |||
func (m *Metric) XXX_Size() int { | |||
return xxx_messageInfo_Metric.Size(m) | |||
} | |||
func (m *Metric) XXX_DiscardUnknown() { | |||
xxx_messageInfo_Metric.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_Metric proto.InternalMessageInfo | |||
func (m *Metric) GetLabel() []*LabelPair { | |||
if m != nil { | |||
return m.Label | |||
} | |||
return nil | |||
} | |||
func (m *Metric) GetGauge() *Gauge { | |||
if m != nil { | |||
return m.Gauge | |||
} | |||
return nil | |||
} | |||
func (m *Metric) GetCounter() *Counter { | |||
if m != nil { | |||
return m.Counter | |||
} | |||
return nil | |||
} | |||
func (m *Metric) GetSummary() *Summary { | |||
if m != nil { | |||
return m.Summary | |||
} | |||
return nil | |||
} | |||
func (m *Metric) GetUntyped() *Untyped { | |||
if m != nil { | |||
return m.Untyped | |||
} | |||
return nil | |||
} | |||
func (m *Metric) GetHistogram() *Histogram { | |||
if m != nil { | |||
return m.Histogram | |||
} | |||
return nil | |||
} | |||
func (m *Metric) GetTimestampMs() int64 { | |||
if m != nil && m.TimestampMs != nil { | |||
return *m.TimestampMs | |||
} | |||
return 0 | |||
} | |||
type MetricFamily struct { | |||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | |||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` | |||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` | |||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` | |||
XXX_NoUnkeyedLiteral struct{} `json:"-"` | |||
XXX_unrecognized []byte `json:"-"` | |||
XXX_sizecache int32 `json:"-"` | |||
} | |||
func (m *MetricFamily) Reset() { *m = MetricFamily{} } | |||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) } | |||
func (*MetricFamily) ProtoMessage() {} | |||
func (*MetricFamily) Descriptor() ([]byte, []int) { | |||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} | |||
} | |||
func (m *MetricFamily) XXX_Unmarshal(b []byte) error { | |||
return xxx_messageInfo_MetricFamily.Unmarshal(m, b) | |||
} | |||
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |||
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) | |||
} | |||
func (dst *MetricFamily) XXX_Merge(src proto.Message) { | |||
xxx_messageInfo_MetricFamily.Merge(dst, src) | |||
} | |||
func (m *MetricFamily) XXX_Size() int { | |||
return xxx_messageInfo_MetricFamily.Size(m) | |||
} | |||
func (m *MetricFamily) XXX_DiscardUnknown() { | |||
xxx_messageInfo_MetricFamily.DiscardUnknown(m) | |||
} | |||
var xxx_messageInfo_MetricFamily proto.InternalMessageInfo | |||
func (m *MetricFamily) GetName() string { | |||
if m != nil && m.Name != nil { | |||
return *m.Name | |||
} | |||
return "" | |||
} | |||
func (m *MetricFamily) GetHelp() string { | |||
if m != nil && m.Help != nil { | |||
return *m.Help | |||
} | |||
return "" | |||
} | |||
func (m *MetricFamily) GetType() MetricType { | |||
if m != nil && m.Type != nil { | |||
return *m.Type | |||
} | |||
return MetricType_COUNTER | |||
} | |||
func (m *MetricFamily) GetMetric() []*Metric { | |||
if m != nil { | |||
return m.Metric | |||
} | |||
return nil | |||
} | |||
func init() { | |||
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") | |||
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") | |||
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") | |||
proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") | |||
proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") | |||
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") | |||
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") | |||
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") | |||
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") | |||
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") | |||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) | |||
} | |||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } | |||
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ | |||
// 591 bytes of a gzipped FileDescriptorProto | |||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, | |||
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, | |||
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, | |||
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, | |||
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, | |||
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, | |||
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, | |||
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, | |||
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, | |||
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, | |||
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, | |||
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, | |||
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, | |||
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, | |||
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, | |||
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, | |||
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, | |||
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, | |||
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, | |||
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, | |||
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, | |||
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, | |||
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, | |||
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, | |||
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, | |||
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, | |||
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, | |||
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, | |||
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, | |||
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, | |||
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, | |||
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, | |||
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, | |||
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, | |||
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, | |||
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, | |||
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, | |||
} |
@@ -0,0 +1,201 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1,201 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1,5 @@ | |||
Common libraries shared by Prometheus Go components. | |||
Copyright 2015 The Prometheus Authors | |||
This product includes software developed at | |||
SoundCloud Ltd. (http://soundcloud.com/). |
@@ -0,0 +1,429 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package expfmt | |||
import ( | |||
"fmt" | |||
"io" | |||
"math" | |||
"mime" | |||
"net/http" | |||
dto "github.com/prometheus/client_model/go" | |||
"github.com/matttproud/golang_protobuf_extensions/pbutil" | |||
"github.com/prometheus/common/model" | |||
) | |||
// Decoder types decode an input stream into metric families. | |||
type Decoder interface { | |||
Decode(*dto.MetricFamily) error | |||
} | |||
// DecodeOptions contains options used by the Decoder and in sample extraction. | |||
type DecodeOptions struct { | |||
// Timestamp is added to each value from the stream that has no explicit timestamp set. | |||
Timestamp model.Time | |||
} | |||
// ResponseFormat extracts the correct format from a HTTP response header. | |||
// If no matching format can be found FormatUnknown is returned. | |||
func ResponseFormat(h http.Header) Format { | |||
ct := h.Get(hdrContentType) | |||
mediatype, params, err := mime.ParseMediaType(ct) | |||
if err != nil { | |||
return FmtUnknown | |||
} | |||
const textType = "text/plain" | |||
switch mediatype { | |||
case ProtoType: | |||
if p, ok := params["proto"]; ok && p != ProtoProtocol { | |||
return FmtUnknown | |||
} | |||
if e, ok := params["encoding"]; ok && e != "delimited" { | |||
return FmtUnknown | |||
} | |||
return FmtProtoDelim | |||
case textType: | |||
if v, ok := params["version"]; ok && v != TextVersion { | |||
return FmtUnknown | |||
} | |||
return FmtText | |||
} | |||
return FmtUnknown | |||
} | |||
// NewDecoder returns a new decoder based on the given input format. | |||
// If the input format does not imply otherwise, a text format decoder is returned. | |||
func NewDecoder(r io.Reader, format Format) Decoder { | |||
switch format { | |||
case FmtProtoDelim: | |||
return &protoDecoder{r: r} | |||
} | |||
return &textDecoder{r: r} | |||
} | |||
// protoDecoder implements the Decoder interface for protocol buffers. | |||
type protoDecoder struct { | |||
r io.Reader | |||
} | |||
// Decode implements the Decoder interface. | |||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error { | |||
_, err := pbutil.ReadDelimited(d.r, v) | |||
if err != nil { | |||
return err | |||
} | |||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) { | |||
return fmt.Errorf("invalid metric name %q", v.GetName()) | |||
} | |||
for _, m := range v.GetMetric() { | |||
if m == nil { | |||
continue | |||
} | |||
for _, l := range m.GetLabel() { | |||
if l == nil { | |||
continue | |||
} | |||
if !model.LabelValue(l.GetValue()).IsValid() { | |||
return fmt.Errorf("invalid label value %q", l.GetValue()) | |||
} | |||
if !model.LabelName(l.GetName()).IsValid() { | |||
return fmt.Errorf("invalid label name %q", l.GetName()) | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
// textDecoder implements the Decoder interface for the text protocol. | |||
type textDecoder struct { | |||
r io.Reader | |||
p TextParser | |||
fams []*dto.MetricFamily | |||
} | |||
// Decode implements the Decoder interface. | |||
func (d *textDecoder) Decode(v *dto.MetricFamily) error { | |||
// TODO(fabxc): Wrap this as a line reader to make streaming safer. | |||
if len(d.fams) == 0 { | |||
// No cached metric families, read everything and parse metrics. | |||
fams, err := d.p.TextToMetricFamilies(d.r) | |||
if err != nil { | |||
return err | |||
} | |||
if len(fams) == 0 { | |||
return io.EOF | |||
} | |||
d.fams = make([]*dto.MetricFamily, 0, len(fams)) | |||
for _, f := range fams { | |||
d.fams = append(d.fams, f) | |||
} | |||
} | |||
*v = *d.fams[0] | |||
d.fams = d.fams[1:] | |||
return nil | |||
} | |||
// SampleDecoder wraps a Decoder to extract samples from the metric families | |||
// decoded by the wrapped Decoder. | |||
type SampleDecoder struct { | |||
Dec Decoder | |||
Opts *DecodeOptions | |||
f dto.MetricFamily | |||
} | |||
// Decode calls the Decode method of the wrapped Decoder and then extracts the | |||
// samples from the decoded MetricFamily into the provided model.Vector. | |||
func (sd *SampleDecoder) Decode(s *model.Vector) error { | |||
err := sd.Dec.Decode(&sd.f) | |||
if err != nil { | |||
return err | |||
} | |||
*s, err = extractSamples(&sd.f, sd.Opts) | |||
return err | |||
} | |||
// ExtractSamples builds a slice of samples from the provided metric | |||
// families. If an error occurrs during sample extraction, it continues to | |||
// extract from the remaining metric families. The returned error is the last | |||
// error that has occurred. | |||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { | |||
var ( | |||
all model.Vector | |||
lastErr error | |||
) | |||
for _, f := range fams { | |||
some, err := extractSamples(f, o) | |||
if err != nil { | |||
lastErr = err | |||
continue | |||
} | |||
all = append(all, some...) | |||
} | |||
return all, lastErr | |||
} | |||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { | |||
switch f.GetType() { | |||
case dto.MetricType_COUNTER: | |||
return extractCounter(o, f), nil | |||
case dto.MetricType_GAUGE: | |||
return extractGauge(o, f), nil | |||
case dto.MetricType_SUMMARY: | |||
return extractSummary(o, f), nil | |||
case dto.MetricType_UNTYPED: | |||
return extractUntyped(o, f), nil | |||
case dto.MetricType_HISTOGRAM: | |||
return extractHistogram(o, f), nil | |||
} | |||
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) | |||
} | |||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | |||
samples := make(model.Vector, 0, len(f.Metric)) | |||
for _, m := range f.Metric { | |||
if m.Counter == nil { | |||
continue | |||
} | |||
lset := make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | |||
smpl := &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Counter.GetValue()), | |||
} | |||
if m.TimestampMs != nil { | |||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | |||
} else { | |||
smpl.Timestamp = o.Timestamp | |||
} | |||
samples = append(samples, smpl) | |||
} | |||
return samples | |||
} | |||
func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | |||
samples := make(model.Vector, 0, len(f.Metric)) | |||
for _, m := range f.Metric { | |||
if m.Gauge == nil { | |||
continue | |||
} | |||
lset := make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | |||
smpl := &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Gauge.GetValue()), | |||
} | |||
if m.TimestampMs != nil { | |||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | |||
} else { | |||
smpl.Timestamp = o.Timestamp | |||
} | |||
samples = append(samples, smpl) | |||
} | |||
return samples | |||
} | |||
func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | |||
samples := make(model.Vector, 0, len(f.Metric)) | |||
for _, m := range f.Metric { | |||
if m.Untyped == nil { | |||
continue | |||
} | |||
lset := make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | |||
smpl := &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Untyped.GetValue()), | |||
} | |||
if m.TimestampMs != nil { | |||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | |||
} else { | |||
smpl.Timestamp = o.Timestamp | |||
} | |||
samples = append(samples, smpl) | |||
} | |||
return samples | |||
} | |||
func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | |||
samples := make(model.Vector, 0, len(f.Metric)) | |||
for _, m := range f.Metric { | |||
if m.Summary == nil { | |||
continue | |||
} | |||
timestamp := o.Timestamp | |||
if m.TimestampMs != nil { | |||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | |||
} | |||
for _, q := range m.Summary.Quantile { | |||
lset := make(model.LabelSet, len(m.Label)+2) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
// BUG(matt): Update other names to "quantile". | |||
lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) | |||
samples = append(samples, &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(q.GetValue()), | |||
Timestamp: timestamp, | |||
}) | |||
} | |||
lset := make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") | |||
samples = append(samples, &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Summary.GetSampleSum()), | |||
Timestamp: timestamp, | |||
}) | |||
lset = make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") | |||
samples = append(samples, &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Summary.GetSampleCount()), | |||
Timestamp: timestamp, | |||
}) | |||
} | |||
return samples | |||
} | |||
func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { | |||
samples := make(model.Vector, 0, len(f.Metric)) | |||
for _, m := range f.Metric { | |||
if m.Histogram == nil { | |||
continue | |||
} | |||
timestamp := o.Timestamp | |||
if m.TimestampMs != nil { | |||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) | |||
} | |||
infSeen := false | |||
for _, q := range m.Histogram.Bucket { | |||
lset := make(model.LabelSet, len(m.Label)+2) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") | |||
if math.IsInf(q.GetUpperBound(), +1) { | |||
infSeen = true | |||
} | |||
samples = append(samples, &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(q.GetCumulativeCount()), | |||
Timestamp: timestamp, | |||
}) | |||
} | |||
lset := make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") | |||
samples = append(samples, &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Histogram.GetSampleSum()), | |||
Timestamp: timestamp, | |||
}) | |||
lset = make(model.LabelSet, len(m.Label)+1) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") | |||
count := &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: model.SampleValue(m.Histogram.GetSampleCount()), | |||
Timestamp: timestamp, | |||
} | |||
samples = append(samples, count) | |||
if !infSeen { | |||
// Append an infinity bucket sample. | |||
lset := make(model.LabelSet, len(m.Label)+2) | |||
for _, p := range m.Label { | |||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) | |||
} | |||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") | |||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") | |||
samples = append(samples, &model.Sample{ | |||
Metric: model.Metric(lset), | |||
Value: count.Value, | |||
Timestamp: timestamp, | |||
}) | |||
} | |||
} | |||
return samples | |||
} |
@@ -0,0 +1,88 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package expfmt | |||
import ( | |||
"fmt" | |||
"io" | |||
"net/http" | |||
"github.com/golang/protobuf/proto" | |||
"github.com/matttproud/golang_protobuf_extensions/pbutil" | |||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// Encoder types encode metric families into an underlying wire protocol. | |||
type Encoder interface { | |||
Encode(*dto.MetricFamily) error | |||
} | |||
type encoder func(*dto.MetricFamily) error | |||
func (e encoder) Encode(v *dto.MetricFamily) error { | |||
return e(v) | |||
} | |||
// Negotiate returns the Content-Type based on the given Accept header. | |||
// If no appropriate accepted type is found, FmtText is returned. | |||
func Negotiate(h http.Header) Format { | |||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { | |||
// Check for protocol buffer | |||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { | |||
switch ac.Params["encoding"] { | |||
case "delimited": | |||
return FmtProtoDelim | |||
case "text": | |||
return FmtProtoText | |||
case "compact-text": | |||
return FmtProtoCompact | |||
} | |||
} | |||
// Check for text format. | |||
ver := ac.Params["version"] | |||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { | |||
return FmtText | |||
} | |||
} | |||
return FmtText | |||
} | |||
// NewEncoder returns a new encoder based on content type negotiation. | |||
func NewEncoder(w io.Writer, format Format) Encoder { | |||
switch format { | |||
case FmtProtoDelim: | |||
return encoder(func(v *dto.MetricFamily) error { | |||
_, err := pbutil.WriteDelimited(w, v) | |||
return err | |||
}) | |||
case FmtProtoCompact: | |||
return encoder(func(v *dto.MetricFamily) error { | |||
_, err := fmt.Fprintln(w, v.String()) | |||
return err | |||
}) | |||
case FmtProtoText: | |||
return encoder(func(v *dto.MetricFamily) error { | |||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v)) | |||
return err | |||
}) | |||
case FmtText: | |||
return encoder(func(v *dto.MetricFamily) error { | |||
_, err := MetricFamilyToText(w, v) | |||
return err | |||
}) | |||
} | |||
panic("expfmt.NewEncoder: unknown format") | |||
} |
@@ -0,0 +1,38 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package expfmt contains tools for reading and writing Prometheus metrics. | |||
package expfmt | |||
// Format specifies the HTTP content type of the different wire protocols. | |||
type Format string | |||
// Constants to assemble the Content-Type values for the different wire protocols. | |||
const ( | |||
TextVersion = "0.0.4" | |||
ProtoType = `application/vnd.google.protobuf` | |||
ProtoProtocol = `io.prometheus.client.MetricFamily` | |||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" | |||
// The Content-Type values for the different wire protocols. | |||
FmtUnknown Format = `<unknown>` | |||
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` | |||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` | |||
FmtProtoText Format = ProtoFmt + ` encoding=text` | |||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` | |||
) | |||
const ( | |||
hdrContentType = "Content-Type" | |||
hdrAccept = "Accept" | |||
) |
@@ -0,0 +1,36 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Build only when actually fuzzing | |||
// +build gofuzz | |||
package expfmt | |||
import "bytes" | |||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: | |||
// | |||
// go-fuzz-build github.com/prometheus/common/expfmt | |||
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz | |||
// | |||
// Further input samples should go in the folder fuzz/corpus. | |||
func Fuzz(in []byte) int { | |||
parser := TextParser{} | |||
_, err := parser.TextToMetricFamilies(bytes.NewReader(in)) | |||
if err != nil { | |||
return 0 | |||
} | |||
return 1 | |||
} |
@@ -0,0 +1,468 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package expfmt | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"io" | |||
"math" | |||
"strconv" | |||
"strings" | |||
"sync" | |||
"github.com/prometheus/common/model" | |||
dto "github.com/prometheus/client_model/go" | |||
) | |||
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer | |||
// implements it. | |||
type enhancedWriter interface { | |||
io.Writer | |||
WriteRune(r rune) (n int, err error) | |||
WriteString(s string) (n int, err error) | |||
WriteByte(c byte) error | |||
} | |||
const ( | |||
initialBufSize = 512 | |||
initialNumBufSize = 24 | |||
) | |||
var ( | |||
bufPool = sync.Pool{ | |||
New: func() interface{} { | |||
return bytes.NewBuffer(make([]byte, 0, initialBufSize)) | |||
}, | |||
} | |||
numBufPool = sync.Pool{ | |||
New: func() interface{} { | |||
b := make([]byte, 0, initialNumBufSize) | |||
return &b | |||
}, | |||
} | |||
) | |||
// MetricFamilyToText converts a MetricFamily proto message into text format and | |||
// writes the resulting lines to 'out'. It returns the number of bytes written | |||
// and any error encountered. The output will have the same order as the input, | |||
// no further sorting is performed. Furthermore, this function assumes the input | |||
// is already sanitized and does not perform any sanity checks. If the input | |||
// contains duplicate metrics or invalid metric or label names, the conversion | |||
// will result in invalid text format output. | |||
// | |||
// This method fulfills the type 'prometheus.encoder'. | |||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { | |||
// Fail-fast checks. | |||
if len(in.Metric) == 0 { | |||
return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) | |||
} | |||
name := in.GetName() | |||
if name == "" { | |||
return 0, fmt.Errorf("MetricFamily has no name: %s", in) | |||
} | |||
// Try the interface upgrade. If it doesn't work, we'll use a | |||
// bytes.Buffer from the sync.Pool and write out its content to out in a | |||
// single go in the end. | |||
w, ok := out.(enhancedWriter) | |||
if !ok { | |||
b := bufPool.Get().(*bytes.Buffer) | |||
b.Reset() | |||
w = b | |||
defer func() { | |||
bWritten, bErr := out.Write(b.Bytes()) | |||
written = bWritten | |||
if err == nil { | |||
err = bErr | |||
} | |||
bufPool.Put(b) | |||
}() | |||
} | |||
var n int | |||
// Comments, first HELP, then TYPE. | |||
if in.Help != nil { | |||
n, err = w.WriteString("# HELP ") | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
n, err = w.WriteString(name) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
err = w.WriteByte(' ') | |||
written++ | |||
if err != nil { | |||
return | |||
} | |||
n, err = writeEscapedString(w, *in.Help, false) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
err = w.WriteByte('\n') | |||
written++ | |||
if err != nil { | |||
return | |||
} | |||
} | |||
n, err = w.WriteString("# TYPE ") | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
n, err = w.WriteString(name) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
metricType := in.GetType() | |||
switch metricType { | |||
case dto.MetricType_COUNTER: | |||
n, err = w.WriteString(" counter\n") | |||
case dto.MetricType_GAUGE: | |||
n, err = w.WriteString(" gauge\n") | |||
case dto.MetricType_SUMMARY: | |||
n, err = w.WriteString(" summary\n") | |||
case dto.MetricType_UNTYPED: | |||
n, err = w.WriteString(" untyped\n") | |||
case dto.MetricType_HISTOGRAM: | |||
n, err = w.WriteString(" histogram\n") | |||
default: | |||
return written, fmt.Errorf("unknown metric type %s", metricType.String()) | |||
} | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
// Finally the samples, one line for each. | |||
for _, metric := range in.Metric { | |||
switch metricType { | |||
case dto.MetricType_COUNTER: | |||
if metric.Counter == nil { | |||
return written, fmt.Errorf( | |||
"expected counter in metric %s %s", name, metric, | |||
) | |||
} | |||
n, err = writeSample( | |||
w, name, "", metric, "", 0, | |||
metric.Counter.GetValue(), | |||
) | |||
case dto.MetricType_GAUGE: | |||
if metric.Gauge == nil { | |||
return written, fmt.Errorf( | |||
"expected gauge in metric %s %s", name, metric, | |||
) | |||
} | |||
n, err = writeSample( | |||
w, name, "", metric, "", 0, | |||
metric.Gauge.GetValue(), | |||
) | |||
case dto.MetricType_UNTYPED: | |||
if metric.Untyped == nil { | |||
return written, fmt.Errorf( | |||
"expected untyped in metric %s %s", name, metric, | |||
) | |||
} | |||
n, err = writeSample( | |||
w, name, "", metric, "", 0, | |||
metric.Untyped.GetValue(), | |||
) | |||
case dto.MetricType_SUMMARY: | |||
if metric.Summary == nil { | |||
return written, fmt.Errorf( | |||
"expected summary in metric %s %s", name, metric, | |||
) | |||
} | |||
for _, q := range metric.Summary.Quantile { | |||
n, err = writeSample( | |||
w, name, "", metric, | |||
model.QuantileLabel, q.GetQuantile(), | |||
q.GetValue(), | |||
) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
} | |||
n, err = writeSample( | |||
w, name, "_sum", metric, "", 0, | |||
metric.Summary.GetSampleSum(), | |||
) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
n, err = writeSample( | |||
w, name, "_count", metric, "", 0, | |||
float64(metric.Summary.GetSampleCount()), | |||
) | |||
case dto.MetricType_HISTOGRAM: | |||
if metric.Histogram == nil { | |||
return written, fmt.Errorf( | |||
"expected histogram in metric %s %s", name, metric, | |||
) | |||
} | |||
infSeen := false | |||
for _, b := range metric.Histogram.Bucket { | |||
n, err = writeSample( | |||
w, name, "_bucket", metric, | |||
model.BucketLabel, b.GetUpperBound(), | |||
float64(b.GetCumulativeCount()), | |||
) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
if math.IsInf(b.GetUpperBound(), +1) { | |||
infSeen = true | |||
} | |||
} | |||
if !infSeen { | |||
n, err = writeSample( | |||
w, name, "_bucket", metric, | |||
model.BucketLabel, math.Inf(+1), | |||
float64(metric.Histogram.GetSampleCount()), | |||
) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
} | |||
n, err = writeSample( | |||
w, name, "_sum", metric, "", 0, | |||
metric.Histogram.GetSampleSum(), | |||
) | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
n, err = writeSample( | |||
w, name, "_count", metric, "", 0, | |||
float64(metric.Histogram.GetSampleCount()), | |||
) | |||
default: | |||
return written, fmt.Errorf( | |||
"unexpected type in metric %s %s", name, metric, | |||
) | |||
} | |||
written += n | |||
if err != nil { | |||
return | |||
} | |||
} | |||
return | |||
} | |||
// writeSample writes a single sample in text format to w, given the metric | |||
// name, the metric proto message itself, optionally an additional label name | |||
// with a float64 value (use empty string as label name if not required), and | |||
// the value. The function returns the number of bytes written and any error | |||
// encountered. | |||
func writeSample( | |||
w enhancedWriter, | |||
name, suffix string, | |||
metric *dto.Metric, | |||
additionalLabelName string, additionalLabelValue float64, | |||
value float64, | |||
) (int, error) { | |||
var written int | |||
n, err := w.WriteString(name) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
if suffix != "" { | |||
n, err = w.WriteString(suffix) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
} | |||
n, err = writeLabelPairs( | |||
w, metric.Label, additionalLabelName, additionalLabelValue, | |||
) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
err = w.WriteByte(' ') | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err = writeFloat(w, value) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
if metric.TimestampMs != nil { | |||
err = w.WriteByte(' ') | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err = writeInt(w, *metric.TimestampMs) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
} | |||
err = w.WriteByte('\n') | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
return written, nil | |||
} | |||
// writeLabelPairs converts a slice of LabelPair proto messages plus the | |||
// explicitly given additional label pair into text formatted as required by the | |||
// text format and writes it to 'w'. An empty slice in combination with an empty | |||
// string 'additionalLabelName' results in nothing being written. Otherwise, the | |||
// label pairs are written, escaped as required by the text format, and enclosed | |||
// in '{...}'. The function returns the number of bytes written and any error | |||
// encountered. | |||
func writeLabelPairs( | |||
w enhancedWriter, | |||
in []*dto.LabelPair, | |||
additionalLabelName string, additionalLabelValue float64, | |||
) (int, error) { | |||
if len(in) == 0 && additionalLabelName == "" { | |||
return 0, nil | |||
} | |||
var ( | |||
written int | |||
separator byte = '{' | |||
) | |||
for _, lp := range in { | |||
err := w.WriteByte(separator) | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err := w.WriteString(lp.GetName()) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err = w.WriteString(`="`) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err = writeEscapedString(w, lp.GetValue(), true) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
err = w.WriteByte('"') | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
separator = ',' | |||
} | |||
if additionalLabelName != "" { | |||
err := w.WriteByte(separator) | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err := w.WriteString(additionalLabelName) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err = w.WriteString(`="`) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
n, err = writeFloat(w, additionalLabelValue) | |||
written += n | |||
if err != nil { | |||
return written, err | |||
} | |||
err = w.WriteByte('"') | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
} | |||
err := w.WriteByte('}') | |||
written++ | |||
if err != nil { | |||
return written, err | |||
} | |||
return written, nil | |||
} | |||
// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if | |||
// includeDoubleQuote is true - '"' by '\"'. | |||
var ( | |||
escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) | |||
quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) | |||
) | |||
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { | |||
if includeDoubleQuote { | |||
return quotedEscaper.WriteString(w, v) | |||
} else { | |||
return escaper.WriteString(w, v) | |||
} | |||
} | |||
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes | |||
// a few common cases for increased efficiency. For non-hardcoded cases, it uses | |||
// strconv.AppendFloat to avoid allocations, similar to writeInt. | |||
func writeFloat(w enhancedWriter, f float64) (int, error) { | |||
switch { | |||
case f == 1: | |||
return 1, w.WriteByte('1') | |||
case f == 0: | |||
return 1, w.WriteByte('0') | |||
case f == -1: | |||
return w.WriteString("-1") | |||
case math.IsNaN(f): | |||
return w.WriteString("NaN") | |||
case math.IsInf(f, +1): | |||
return w.WriteString("+Inf") | |||
case math.IsInf(f, -1): | |||
return w.WriteString("-Inf") | |||
default: | |||
bp := numBufPool.Get().(*[]byte) | |||
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) | |||
written, err := w.Write(*bp) | |||
numBufPool.Put(bp) | |||
return written, err | |||
} | |||
} | |||
// writeInt is equivalent to fmt.Fprint with an int64 argument but uses | |||
// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid | |||
// allocations. | |||
func writeInt(w enhancedWriter, i int64) (int, error) { | |||
bp := numBufPool.Get().(*[]byte) | |||
*bp = strconv.AppendInt((*bp)[:0], i, 10) | |||
written, err := w.Write(*bp) | |||
numBufPool.Put(bp) | |||
return written, err | |||
} |
@@ -0,0 +1,757 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package expfmt | |||
import ( | |||
"bufio" | |||
"bytes" | |||
"fmt" | |||
"io" | |||
"math" | |||
"strconv" | |||
"strings" | |||
dto "github.com/prometheus/client_model/go" | |||
"github.com/golang/protobuf/proto" | |||
"github.com/prometheus/common/model" | |||
) | |||
// A stateFn is a function that represents a state in a state machine. By | |||
// executing it, the state is progressed to the next state. The stateFn returns | |||
// another stateFn, which represents the new state. The end state is represented | |||
// by nil. | |||
type stateFn func() stateFn | |||
// ParseError signals errors while parsing the simple and flat text-based | |||
// exchange format. | |||
type ParseError struct { | |||
Line int | |||
Msg string | |||
} | |||
// Error implements the error interface. | |||
func (e ParseError) Error() string { | |||
return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) | |||
} | |||
// TextParser is used to parse the simple and flat text-based exchange format. Its | |||
// zero value is ready to use. | |||
type TextParser struct { | |||
metricFamiliesByName map[string]*dto.MetricFamily | |||
buf *bufio.Reader // Where the parsed input is read through. | |||
err error // Most recent error. | |||
lineCount int // Tracks the line count for error messages. | |||
currentByte byte // The most recent byte read. | |||
currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. | |||
currentMF *dto.MetricFamily | |||
currentMetric *dto.Metric | |||
currentLabelPair *dto.LabelPair | |||
// The remaining member variables are only used for summaries/histograms. | |||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' | |||
// Summary specific. | |||
summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. | |||
currentQuantile float64 | |||
// Histogram specific. | |||
histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. | |||
currentBucket float64 | |||
// These tell us if the currently processed line ends on '_count' or | |||
// '_sum' respectively and belong to a summary/histogram, representing the sample | |||
// count and sum of that summary/histogram. | |||
currentIsSummaryCount, currentIsSummarySum bool | |||
currentIsHistogramCount, currentIsHistogramSum bool | |||
} | |||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange | |||
// format and creates MetricFamily proto messages. It returns the MetricFamily | |||
// proto messages in a map where the metric names are the keys, along with any | |||
// error encountered. | |||
// | |||
// If the input contains duplicate metrics (i.e. lines with the same metric name | |||
// and exactly the same label set), the resulting MetricFamily will contain | |||
// duplicate Metric proto messages. Similar is true for duplicate label | |||
// names. Checks for duplicates have to be performed separately, if required. | |||
// Also note that neither the metrics within each MetricFamily are sorted nor | |||
// the label pairs within each Metric. Sorting is not required for the most | |||
// frequent use of this method, which is sample ingestion in the Prometheus | |||
// server. However, for presentation purposes, you might want to sort the | |||
// metrics, and in some cases, you must sort the labels, e.g. for consumption by | |||
// the metric family injection hook of the Prometheus registry. | |||
// | |||
// Summaries and histograms are rather special beasts. You would probably not | |||
// use them in the simple text format anyway. This method can deal with | |||
// summaries and histograms if they are presented in exactly the way the | |||
// text.Create function creates them. | |||
// | |||
// This method must not be called concurrently. If you want to parse different | |||
// input concurrently, instantiate a separate Parser for each goroutine. | |||
func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { | |||
p.reset(in) | |||
for nextState := p.startOfLine; nextState != nil; nextState = nextState() { | |||
// Magic happens here... | |||
} | |||
// Get rid of empty metric families. | |||
for k, mf := range p.metricFamiliesByName { | |||
if len(mf.GetMetric()) == 0 { | |||
delete(p.metricFamiliesByName, k) | |||
} | |||
} | |||
// If p.err is io.EOF now, we have run into a premature end of the input | |||
// stream. Turn this error into something nicer and more | |||
// meaningful. (io.EOF is often used as a signal for the legitimate end | |||
// of an input stream.) | |||
if p.err == io.EOF { | |||
p.parseError("unexpected end of input stream") | |||
} | |||
return p.metricFamiliesByName, p.err | |||
} | |||
func (p *TextParser) reset(in io.Reader) { | |||
p.metricFamiliesByName = map[string]*dto.MetricFamily{} | |||
if p.buf == nil { | |||
p.buf = bufio.NewReader(in) | |||
} else { | |||
p.buf.Reset(in) | |||
} | |||
p.err = nil | |||
p.lineCount = 0 | |||
if p.summaries == nil || len(p.summaries) > 0 { | |||
p.summaries = map[uint64]*dto.Metric{} | |||
} | |||
if p.histograms == nil || len(p.histograms) > 0 { | |||
p.histograms = map[uint64]*dto.Metric{} | |||
} | |||
p.currentQuantile = math.NaN() | |||
p.currentBucket = math.NaN() | |||
} | |||
// startOfLine represents the state where the next byte read from p.buf is the | |||
// start of a line (or whitespace leading up to it). | |||
func (p *TextParser) startOfLine() stateFn { | |||
p.lineCount++ | |||
if p.skipBlankTab(); p.err != nil { | |||
// End of input reached. This is the only case where | |||
// that is not an error but a signal that we are done. | |||
p.err = nil | |||
return nil | |||
} | |||
switch p.currentByte { | |||
case '#': | |||
return p.startComment | |||
case '\n': | |||
return p.startOfLine // Empty line, start the next one. | |||
} | |||
return p.readingMetricName | |||
} | |||
// startComment represents the state where the next byte read from p.buf is the | |||
// start of a comment (or whitespace leading up to it). | |||
func (p *TextParser) startComment() stateFn { | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentByte == '\n' { | |||
return p.startOfLine | |||
} | |||
if p.readTokenUntilWhitespace(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
// If we have hit the end of line already, there is nothing left | |||
// to do. This is not considered a syntax error. | |||
if p.currentByte == '\n' { | |||
return p.startOfLine | |||
} | |||
keyword := p.currentToken.String() | |||
if keyword != "HELP" && keyword != "TYPE" { | |||
// Generic comment, ignore by fast forwarding to end of line. | |||
for p.currentByte != '\n' { | |||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
} | |||
return p.startOfLine | |||
} | |||
// There is something. Next has to be a metric name. | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.readTokenAsMetricName(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentByte == '\n' { | |||
// At the end of the line already. | |||
// Again, this is not considered a syntax error. | |||
return p.startOfLine | |||
} | |||
if !isBlankOrTab(p.currentByte) { | |||
p.parseError("invalid metric name in comment") | |||
return nil | |||
} | |||
p.setOrCreateCurrentMF() | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentByte == '\n' { | |||
// At the end of the line already. | |||
// Again, this is not considered a syntax error. | |||
return p.startOfLine | |||
} | |||
switch keyword { | |||
case "HELP": | |||
return p.readingHelp | |||
case "TYPE": | |||
return p.readingType | |||
} | |||
panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) | |||
} | |||
// readingMetricName represents the state where the last byte read (now in | |||
// p.currentByte) is the first byte of a metric name. | |||
func (p *TextParser) readingMetricName() stateFn { | |||
if p.readTokenAsMetricName(); p.err != nil { | |||
return nil | |||
} | |||
if p.currentToken.Len() == 0 { | |||
p.parseError("invalid metric name") | |||
return nil | |||
} | |||
p.setOrCreateCurrentMF() | |||
// Now is the time to fix the type if it hasn't happened yet. | |||
if p.currentMF.Type == nil { | |||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum() | |||
} | |||
p.currentMetric = &dto.Metric{} | |||
// Do not append the newly created currentMetric to | |||
// currentMF.Metric right now. First wait if this is a summary, | |||
// and the metric exists already, which we can only know after | |||
// having read all the labels. | |||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
return p.readingLabels | |||
} | |||
// readingLabels represents the state where the last byte read (now in | |||
// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the | |||
// first byte of the value (otherwise). | |||
func (p *TextParser) readingLabels() stateFn { | |||
// Summaries/histograms are special. We have to reset the | |||
// currentLabels map, currentQuantile and currentBucket before starting to | |||
// read labels. | |||
if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | |||
p.currentLabels = map[string]string{} | |||
p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() | |||
p.currentQuantile = math.NaN() | |||
p.currentBucket = math.NaN() | |||
} | |||
if p.currentByte != '{' { | |||
return p.readingValue | |||
} | |||
return p.startLabelName | |||
} | |||
// startLabelName represents the state where the next byte read from p.buf is | |||
// the start of a label name (or whitespace leading up to it). | |||
func (p *TextParser) startLabelName() stateFn { | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentByte == '}' { | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
return p.readingValue | |||
} | |||
if p.readTokenAsLabelName(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentToken.Len() == 0 { | |||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) | |||
return nil | |||
} | |||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} | |||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { | |||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) | |||
return nil | |||
} | |||
// Special summary/histogram treatment. Don't add 'quantile' and 'le' | |||
// labels to 'real' labels. | |||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && | |||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { | |||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) | |||
} | |||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentByte != '=' { | |||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) | |||
return nil | |||
} | |||
return p.startLabelValue | |||
} | |||
// startLabelValue represents the state where the next byte read from p.buf is | |||
// the start of a (quoted) label value (or whitespace leading up to it). | |||
func (p *TextParser) startLabelValue() stateFn { | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentByte != '"' { | |||
p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) | |||
return nil | |||
} | |||
if p.readTokenAsLabelValue(); p.err != nil { | |||
return nil | |||
} | |||
if !model.LabelValue(p.currentToken.String()).IsValid() { | |||
p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) | |||
return nil | |||
} | |||
p.currentLabelPair.Value = proto.String(p.currentToken.String()) | |||
// Special treatment of summaries: | |||
// - Quantile labels are special, will result in dto.Quantile later. | |||
// - Other labels have to be added to currentLabels for signature calculation. | |||
if p.currentMF.GetType() == dto.MetricType_SUMMARY { | |||
if p.currentLabelPair.GetName() == model.QuantileLabel { | |||
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { | |||
// Create a more helpful error message. | |||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) | |||
return nil | |||
} | |||
} else { | |||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() | |||
} | |||
} | |||
// Similar special treatment of histograms. | |||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | |||
if p.currentLabelPair.GetName() == model.BucketLabel { | |||
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { | |||
// Create a more helpful error message. | |||
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) | |||
return nil | |||
} | |||
} else { | |||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() | |||
} | |||
} | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
switch p.currentByte { | |||
case ',': | |||
return p.startLabelName | |||
case '}': | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
return p.readingValue | |||
default: | |||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) | |||
return nil | |||
} | |||
} | |||
// readingValue represents the state where the last byte read (now in | |||
// p.currentByte) is the first byte of the sample value (i.e. a float). | |||
func (p *TextParser) readingValue() stateFn { | |||
// When we are here, we have read all the labels, so for the | |||
// special case of a summary/histogram, we can finally find out | |||
// if the metric already exists. | |||
if p.currentMF.GetType() == dto.MetricType_SUMMARY { | |||
signature := model.LabelsToSignature(p.currentLabels) | |||
if summary := p.summaries[signature]; summary != nil { | |||
p.currentMetric = summary | |||
} else { | |||
p.summaries[signature] = p.currentMetric | |||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) | |||
} | |||
} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | |||
signature := model.LabelsToSignature(p.currentLabels) | |||
if histogram := p.histograms[signature]; histogram != nil { | |||
p.currentMetric = histogram | |||
} else { | |||
p.histograms[signature] = p.currentMetric | |||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) | |||
} | |||
} else { | |||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) | |||
} | |||
if p.readTokenUntilWhitespace(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
value, err := strconv.ParseFloat(p.currentToken.String(), 64) | |||
if err != nil { | |||
// Create a more helpful error message. | |||
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) | |||
return nil | |||
} | |||
switch p.currentMF.GetType() { | |||
case dto.MetricType_COUNTER: | |||
p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} | |||
case dto.MetricType_GAUGE: | |||
p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} | |||
case dto.MetricType_UNTYPED: | |||
p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} | |||
case dto.MetricType_SUMMARY: | |||
// *sigh* | |||
if p.currentMetric.Summary == nil { | |||
p.currentMetric.Summary = &dto.Summary{} | |||
} | |||
switch { | |||
case p.currentIsSummaryCount: | |||
p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) | |||
case p.currentIsSummarySum: | |||
p.currentMetric.Summary.SampleSum = proto.Float64(value) | |||
case !math.IsNaN(p.currentQuantile): | |||
p.currentMetric.Summary.Quantile = append( | |||
p.currentMetric.Summary.Quantile, | |||
&dto.Quantile{ | |||
Quantile: proto.Float64(p.currentQuantile), | |||
Value: proto.Float64(value), | |||
}, | |||
) | |||
} | |||
case dto.MetricType_HISTOGRAM: | |||
// *sigh* | |||
if p.currentMetric.Histogram == nil { | |||
p.currentMetric.Histogram = &dto.Histogram{} | |||
} | |||
switch { | |||
case p.currentIsHistogramCount: | |||
p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) | |||
case p.currentIsHistogramSum: | |||
p.currentMetric.Histogram.SampleSum = proto.Float64(value) | |||
case !math.IsNaN(p.currentBucket): | |||
p.currentMetric.Histogram.Bucket = append( | |||
p.currentMetric.Histogram.Bucket, | |||
&dto.Bucket{ | |||
UpperBound: proto.Float64(p.currentBucket), | |||
CumulativeCount: proto.Uint64(uint64(value)), | |||
}, | |||
) | |||
} | |||
default: | |||
p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) | |||
} | |||
if p.currentByte == '\n' { | |||
return p.startOfLine | |||
} | |||
return p.startTimestamp | |||
} | |||
// startTimestamp represents the state where the next byte read from p.buf is | |||
// the start of the timestamp (or whitespace leading up to it). | |||
func (p *TextParser) startTimestamp() stateFn { | |||
if p.skipBlankTab(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.readTokenUntilWhitespace(); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) | |||
if err != nil { | |||
// Create a more helpful error message. | |||
p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) | |||
return nil | |||
} | |||
p.currentMetric.TimestampMs = proto.Int64(timestamp) | |||
if p.readTokenUntilNewline(false); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
if p.currentToken.Len() > 0 { | |||
p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) | |||
return nil | |||
} | |||
return p.startOfLine | |||
} | |||
// readingHelp represents the state where the last byte read (now in | |||
// p.currentByte) is the first byte of the docstring after 'HELP'. | |||
func (p *TextParser) readingHelp() stateFn { | |||
if p.currentMF.Help != nil { | |||
p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) | |||
return nil | |||
} | |||
// Rest of line is the docstring. | |||
if p.readTokenUntilNewline(true); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
p.currentMF.Help = proto.String(p.currentToken.String()) | |||
return p.startOfLine | |||
} | |||
// readingType represents the state where the last byte read (now in | |||
// p.currentByte) is the first byte of the type hint after 'HELP'. | |||
func (p *TextParser) readingType() stateFn { | |||
if p.currentMF.Type != nil { | |||
p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) | |||
return nil | |||
} | |||
// Rest of line is the type. | |||
if p.readTokenUntilNewline(false); p.err != nil { | |||
return nil // Unexpected end of input. | |||
} | |||
metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] | |||
if !ok { | |||
p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) | |||
return nil | |||
} | |||
p.currentMF.Type = dto.MetricType(metricType).Enum() | |||
return p.startOfLine | |||
} | |||
// parseError sets p.err to a ParseError at the current line with the given | |||
// message. | |||
func (p *TextParser) parseError(msg string) { | |||
p.err = ParseError{ | |||
Line: p.lineCount, | |||
Msg: msg, | |||
} | |||
} | |||
// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte | |||
// that is neither ' ' nor '\t'. That byte is left in p.currentByte. | |||
func (p *TextParser) skipBlankTab() { | |||
for { | |||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { | |||
return | |||
} | |||
} | |||
} | |||
// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do | |||
// anything if p.currentByte is neither ' ' nor '\t'. | |||
func (p *TextParser) skipBlankTabIfCurrentBlankTab() { | |||
if isBlankOrTab(p.currentByte) { | |||
p.skipBlankTab() | |||
} | |||
} | |||
// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The | |||
// first byte considered is the byte already read (now in p.currentByte). The | |||
// first whitespace byte encountered is still copied into p.currentByte, but not | |||
// into p.currentToken. | |||
func (p *TextParser) readTokenUntilWhitespace() { | |||
p.currentToken.Reset() | |||
for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { | |||
p.currentToken.WriteByte(p.currentByte) | |||
p.currentByte, p.err = p.buf.ReadByte() | |||
} | |||
} | |||
// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first | |||
// byte considered is the byte already read (now in p.currentByte). The first | |||
// newline byte encountered is still copied into p.currentByte, but not into | |||
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are | |||
// recognized: '\\' translates into '\', and '\n' into a line-feed character. | |||
// All other escape sequences are invalid and cause an error. | |||
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { | |||
p.currentToken.Reset() | |||
escaped := false | |||
for p.err == nil { | |||
if recognizeEscapeSequence && escaped { | |||
switch p.currentByte { | |||
case '\\': | |||
p.currentToken.WriteByte(p.currentByte) | |||
case 'n': | |||
p.currentToken.WriteByte('\n') | |||
default: | |||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) | |||
return | |||
} | |||
escaped = false | |||
} else { | |||
switch p.currentByte { | |||
case '\n': | |||
return | |||
case '\\': | |||
escaped = true | |||
default: | |||
p.currentToken.WriteByte(p.currentByte) | |||
} | |||
} | |||
p.currentByte, p.err = p.buf.ReadByte() | |||
} | |||
} | |||
// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. | |||
// The first byte considered is the byte already read (now in p.currentByte). | |||
// The first byte not part of a metric name is still copied into p.currentByte, | |||
// but not into p.currentToken. | |||
func (p *TextParser) readTokenAsMetricName() { | |||
p.currentToken.Reset() | |||
if !isValidMetricNameStart(p.currentByte) { | |||
return | |||
} | |||
for { | |||
p.currentToken.WriteByte(p.currentByte) | |||
p.currentByte, p.err = p.buf.ReadByte() | |||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { | |||
return | |||
} | |||
} | |||
} | |||
// readTokenAsLabelName copies a label name from p.buf into p.currentToken. | |||
// The first byte considered is the byte already read (now in p.currentByte). | |||
// The first byte not part of a label name is still copied into p.currentByte, | |||
// but not into p.currentToken. | |||
func (p *TextParser) readTokenAsLabelName() { | |||
p.currentToken.Reset() | |||
if !isValidLabelNameStart(p.currentByte) { | |||
return | |||
} | |||
for { | |||
p.currentToken.WriteByte(p.currentByte) | |||
p.currentByte, p.err = p.buf.ReadByte() | |||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { | |||
return | |||
} | |||
} | |||
} | |||
// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. | |||
// In contrast to the other 'readTokenAs...' functions, which start with the | |||
// last read byte in p.currentByte, this method ignores p.currentByte and starts | |||
// with reading a new byte from p.buf. The first byte not part of a label value | |||
// is still copied into p.currentByte, but not into p.currentToken. | |||
func (p *TextParser) readTokenAsLabelValue() { | |||
p.currentToken.Reset() | |||
escaped := false | |||
for { | |||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { | |||
return | |||
} | |||
if escaped { | |||
switch p.currentByte { | |||
case '"', '\\': | |||
p.currentToken.WriteByte(p.currentByte) | |||
case 'n': | |||
p.currentToken.WriteByte('\n') | |||
default: | |||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) | |||
return | |||
} | |||
escaped = false | |||
continue | |||
} | |||
switch p.currentByte { | |||
case '"': | |||
return | |||
case '\n': | |||
p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) | |||
return | |||
case '\\': | |||
escaped = true | |||
default: | |||
p.currentToken.WriteByte(p.currentByte) | |||
} | |||
} | |||
} | |||
func (p *TextParser) setOrCreateCurrentMF() { | |||
p.currentIsSummaryCount = false | |||
p.currentIsSummarySum = false | |||
p.currentIsHistogramCount = false | |||
p.currentIsHistogramSum = false | |||
name := p.currentToken.String() | |||
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { | |||
return | |||
} | |||
// Try out if this is a _sum or _count for a summary/histogram. | |||
summaryName := summaryMetricName(name) | |||
if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { | |||
if p.currentMF.GetType() == dto.MetricType_SUMMARY { | |||
if isCount(name) { | |||
p.currentIsSummaryCount = true | |||
} | |||
if isSum(name) { | |||
p.currentIsSummarySum = true | |||
} | |||
return | |||
} | |||
} | |||
histogramName := histogramMetricName(name) | |||
if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { | |||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { | |||
if isCount(name) { | |||
p.currentIsHistogramCount = true | |||
} | |||
if isSum(name) { | |||
p.currentIsHistogramSum = true | |||
} | |||
return | |||
} | |||
} | |||
p.currentMF = &dto.MetricFamily{Name: proto.String(name)} | |||
p.metricFamiliesByName[name] = p.currentMF | |||
} | |||
func isValidLabelNameStart(b byte) bool { | |||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' | |||
} | |||
func isValidLabelNameContinuation(b byte) bool { | |||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9') | |||
} | |||
func isValidMetricNameStart(b byte) bool { | |||
return isValidLabelNameStart(b) || b == ':' | |||
} | |||
func isValidMetricNameContinuation(b byte) bool { | |||
return isValidLabelNameContinuation(b) || b == ':' | |||
} | |||
func isBlankOrTab(b byte) bool { | |||
return b == ' ' || b == '\t' | |||
} | |||
func isCount(name string) bool { | |||
return len(name) > 6 && name[len(name)-6:] == "_count" | |||
} | |||
func isSum(name string) bool { | |||
return len(name) > 4 && name[len(name)-4:] == "_sum" | |||
} | |||
func isBucket(name string) bool { | |||
return len(name) > 7 && name[len(name)-7:] == "_bucket" | |||
} | |||
func summaryMetricName(name string) string { | |||
switch { | |||
case isCount(name): | |||
return name[:len(name)-6] | |||
case isSum(name): | |||
return name[:len(name)-4] | |||
default: | |||
return name | |||
} | |||
} | |||
func histogramMetricName(name string) string { | |||
switch { | |||
case isCount(name): | |||
return name[:len(name)-6] | |||
case isSum(name): | |||
return name[:len(name)-4] | |||
case isBucket(name): | |||
return name[:len(name)-7] | |||
default: | |||
return name | |||
} | |||
} |
@@ -0,0 +1,162 @@ | |||
/* | |||
HTTP Content-Type Autonegotiation. | |||
The functions in this package implement the behaviour specified in | |||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html | |||
Copyright (c) 2011, Open Knowledge Foundation Ltd. | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are | |||
met: | |||
Redistributions of source code must retain the above copyright | |||
notice, this list of conditions and the following disclaimer. | |||
Redistributions in binary form must reproduce the above copyright | |||
notice, this list of conditions and the following disclaimer in | |||
the documentation and/or other materials provided with the | |||
distribution. | |||
Neither the name of the Open Knowledge Foundation Ltd. nor the | |||
names of its contributors may be used to endorse or promote | |||
products derived from this software without specific prior written | |||
permission. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
*/ | |||
package goautoneg | |||
import ( | |||
"sort" | |||
"strconv" | |||
"strings" | |||
) | |||
// Structure to represent a clause in an HTTP Accept Header | |||
type Accept struct { | |||
Type, SubType string | |||
Q float64 | |||
Params map[string]string | |||
} | |||
// For internal use, so that we can use the sort interface | |||
type accept_slice []Accept | |||
func (accept accept_slice) Len() int { | |||
slice := []Accept(accept) | |||
return len(slice) | |||
} | |||
func (accept accept_slice) Less(i, j int) bool { | |||
slice := []Accept(accept) | |||
ai, aj := slice[i], slice[j] | |||
if ai.Q > aj.Q { | |||
return true | |||
} | |||
if ai.Type != "*" && aj.Type == "*" { | |||
return true | |||
} | |||
if ai.SubType != "*" && aj.SubType == "*" { | |||
return true | |||
} | |||
return false | |||
} | |||
func (accept accept_slice) Swap(i, j int) { | |||
slice := []Accept(accept) | |||
slice[i], slice[j] = slice[j], slice[i] | |||
} | |||
// Parse an Accept Header string returning a sorted list | |||
// of clauses | |||
func ParseAccept(header string) (accept []Accept) { | |||
parts := strings.Split(header, ",") | |||
accept = make([]Accept, 0, len(parts)) | |||
for _, part := range parts { | |||
part := strings.Trim(part, " ") | |||
a := Accept{} | |||
a.Params = make(map[string]string) | |||
a.Q = 1.0 | |||
mrp := strings.Split(part, ";") | |||
media_range := mrp[0] | |||
sp := strings.Split(media_range, "/") | |||
a.Type = strings.Trim(sp[0], " ") | |||
switch { | |||
case len(sp) == 1 && a.Type == "*": | |||
a.SubType = "*" | |||
case len(sp) == 2: | |||
a.SubType = strings.Trim(sp[1], " ") | |||
default: | |||
continue | |||
} | |||
if len(mrp) == 1 { | |||
accept = append(accept, a) | |||
continue | |||
} | |||
for _, param := range mrp[1:] { | |||
sp := strings.SplitN(param, "=", 2) | |||
if len(sp) != 2 { | |||
continue | |||
} | |||
token := strings.Trim(sp[0], " ") | |||
if token == "q" { | |||
a.Q, _ = strconv.ParseFloat(sp[1], 32) | |||
} else { | |||
a.Params[token] = strings.Trim(sp[1], " ") | |||
} | |||
} | |||
accept = append(accept, a) | |||
} | |||
slice := accept_slice(accept) | |||
sort.Sort(slice) | |||
return | |||
} | |||
// Negotiate the most appropriate content_type given the accept header | |||
// and a list of alternatives. | |||
func Negotiate(header string, alternatives []string) (content_type string) { | |||
asp := make([][]string, 0, len(alternatives)) | |||
for _, ctype := range alternatives { | |||
asp = append(asp, strings.SplitN(ctype, "/", 2)) | |||
} | |||
for _, clause := range ParseAccept(header) { | |||
for i, ctsp := range asp { | |||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { | |||
content_type = alternatives[i] | |||
return | |||
} | |||
if clause.Type == ctsp[0] && clause.SubType == "*" { | |||
content_type = alternatives[i] | |||
return | |||
} | |||
if clause.Type == "*" && clause.SubType == "*" { | |||
content_type = alternatives[i] | |||
return | |||
} | |||
} | |||
} | |||
return | |||
} |
@@ -0,0 +1,136 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"fmt" | |||
"time" | |||
) | |||
type AlertStatus string | |||
const ( | |||
AlertFiring AlertStatus = "firing" | |||
AlertResolved AlertStatus = "resolved" | |||
) | |||
// Alert is a generic representation of an alert in the Prometheus eco-system. | |||
type Alert struct { | |||
// Label value pairs for purpose of aggregation, matching, and disposition | |||
// dispatching. This must minimally include an "alertname" label. | |||
Labels LabelSet `json:"labels"` | |||
// Extra key/value information which does not define alert identity. | |||
Annotations LabelSet `json:"annotations"` | |||
// The known time range for this alert. Both ends are optional. | |||
StartsAt time.Time `json:"startsAt,omitempty"` | |||
EndsAt time.Time `json:"endsAt,omitempty"` | |||
GeneratorURL string `json:"generatorURL"` | |||
} | |||
// Name returns the name of the alert. It is equivalent to the "alertname" label. | |||
func (a *Alert) Name() string { | |||
return string(a.Labels[AlertNameLabel]) | |||
} | |||
// Fingerprint returns a unique hash for the alert. It is equivalent to | |||
// the fingerprint of the alert's label set. | |||
func (a *Alert) Fingerprint() Fingerprint { | |||
return a.Labels.Fingerprint() | |||
} | |||
func (a *Alert) String() string { | |||
s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) | |||
if a.Resolved() { | |||
return s + "[resolved]" | |||
} | |||
return s + "[active]" | |||
} | |||
// Resolved returns true iff the activity interval ended in the past. | |||
func (a *Alert) Resolved() bool { | |||
return a.ResolvedAt(time.Now()) | |||
} | |||
// ResolvedAt returns true off the activity interval ended before | |||
// the given timestamp. | |||
func (a *Alert) ResolvedAt(ts time.Time) bool { | |||
if a.EndsAt.IsZero() { | |||
return false | |||
} | |||
return !a.EndsAt.After(ts) | |||
} | |||
// Status returns the status of the alert. | |||
func (a *Alert) Status() AlertStatus { | |||
if a.Resolved() { | |||
return AlertResolved | |||
} | |||
return AlertFiring | |||
} | |||
// Validate checks whether the alert data is inconsistent. | |||
func (a *Alert) Validate() error { | |||
if a.StartsAt.IsZero() { | |||
return fmt.Errorf("start time missing") | |||
} | |||
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { | |||
return fmt.Errorf("start time must be before end time") | |||
} | |||
if err := a.Labels.Validate(); err != nil { | |||
return fmt.Errorf("invalid label set: %s", err) | |||
} | |||
if len(a.Labels) == 0 { | |||
return fmt.Errorf("at least one label pair required") | |||
} | |||
if err := a.Annotations.Validate(); err != nil { | |||
return fmt.Errorf("invalid annotations: %s", err) | |||
} | |||
return nil | |||
} | |||
// Alert is a list of alerts that can be sorted in chronological order. | |||
type Alerts []*Alert | |||
func (as Alerts) Len() int { return len(as) } | |||
func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } | |||
func (as Alerts) Less(i, j int) bool { | |||
if as[i].StartsAt.Before(as[j].StartsAt) { | |||
return true | |||
} | |||
if as[i].EndsAt.Before(as[j].EndsAt) { | |||
return true | |||
} | |||
return as[i].Fingerprint() < as[j].Fingerprint() | |||
} | |||
// HasFiring returns true iff one of the alerts is not resolved. | |||
func (as Alerts) HasFiring() bool { | |||
for _, a := range as { | |||
if !a.Resolved() { | |||
return true | |||
} | |||
} | |||
return false | |||
} | |||
// Status returns StatusFiring iff at least one of the alerts is firing. | |||
func (as Alerts) Status() AlertStatus { | |||
if as.HasFiring() { | |||
return AlertFiring | |||
} | |||
return AlertResolved | |||
} |
@@ -0,0 +1,105 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"fmt" | |||
"strconv" | |||
) | |||
// Fingerprint provides a hash-capable representation of a Metric. | |||
// For our purposes, FNV-1A 64-bit is used. | |||
type Fingerprint uint64 | |||
// FingerprintFromString transforms a string representation into a Fingerprint. | |||
func FingerprintFromString(s string) (Fingerprint, error) { | |||
num, err := strconv.ParseUint(s, 16, 64) | |||
return Fingerprint(num), err | |||
} | |||
// ParseFingerprint parses the input string into a fingerprint. | |||
func ParseFingerprint(s string) (Fingerprint, error) { | |||
num, err := strconv.ParseUint(s, 16, 64) | |||
if err != nil { | |||
return 0, err | |||
} | |||
return Fingerprint(num), nil | |||
} | |||
func (f Fingerprint) String() string { | |||
return fmt.Sprintf("%016x", uint64(f)) | |||
} | |||
// Fingerprints represents a collection of Fingerprint subject to a given | |||
// natural sorting scheme. It implements sort.Interface. | |||
type Fingerprints []Fingerprint | |||
// Len implements sort.Interface. | |||
func (f Fingerprints) Len() int { | |||
return len(f) | |||
} | |||
// Less implements sort.Interface. | |||
func (f Fingerprints) Less(i, j int) bool { | |||
return f[i] < f[j] | |||
} | |||
// Swap implements sort.Interface. | |||
func (f Fingerprints) Swap(i, j int) { | |||
f[i], f[j] = f[j], f[i] | |||
} | |||
// FingerprintSet is a set of Fingerprints. | |||
type FingerprintSet map[Fingerprint]struct{} | |||
// Equal returns true if both sets contain the same elements (and not more). | |||
func (s FingerprintSet) Equal(o FingerprintSet) bool { | |||
if len(s) != len(o) { | |||
return false | |||
} | |||
for k := range s { | |||
if _, ok := o[k]; !ok { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// Intersection returns the elements contained in both sets. | |||
func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { | |||
myLength, otherLength := len(s), len(o) | |||
if myLength == 0 || otherLength == 0 { | |||
return FingerprintSet{} | |||
} | |||
subSet := s | |||
superSet := o | |||
if otherLength < myLength { | |||
subSet = o | |||
superSet = s | |||
} | |||
out := FingerprintSet{} | |||
for k := range subSet { | |||
if _, ok := superSet[k]; ok { | |||
out[k] = struct{}{} | |||
} | |||
} | |||
return out | |||
} |
@@ -0,0 +1,42 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
// Inline and byte-free variant of hash/fnv's fnv64a. | |||
const ( | |||
offset64 = 14695981039346656037 | |||
prime64 = 1099511628211 | |||
) | |||
// hashNew initializies a new fnv64a hash value. | |||
func hashNew() uint64 { | |||
return offset64 | |||
} | |||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash. | |||
func hashAdd(h uint64, s string) uint64 { | |||
for i := 0; i < len(s); i++ { | |||
h ^= uint64(s[i]) | |||
h *= prime64 | |||
} | |||
return h | |||
} | |||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. | |||
func hashAddByte(h uint64, b byte) uint64 { | |||
h ^= uint64(b) | |||
h *= prime64 | |||
return h | |||
} |
@@ -0,0 +1,210 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"regexp" | |||
"strings" | |||
"unicode/utf8" | |||
) | |||
const ( | |||
// AlertNameLabel is the name of the label containing the an alert's name. | |||
AlertNameLabel = "alertname" | |||
// ExportedLabelPrefix is the prefix to prepend to the label names present in | |||
// exported metrics if a label of the same name is added by the server. | |||
ExportedLabelPrefix = "exported_" | |||
// MetricNameLabel is the label name indicating the metric name of a | |||
// timeseries. | |||
MetricNameLabel = "__name__" | |||
// SchemeLabel is the name of the label that holds the scheme on which to | |||
// scrape a target. | |||
SchemeLabel = "__scheme__" | |||
// AddressLabel is the name of the label that holds the address of | |||
// a scrape target. | |||
AddressLabel = "__address__" | |||
// MetricsPathLabel is the name of the label that holds the path on which to | |||
// scrape a target. | |||
MetricsPathLabel = "__metrics_path__" | |||
// ReservedLabelPrefix is a prefix which is not legal in user-supplied | |||
// label names. | |||
ReservedLabelPrefix = "__" | |||
// MetaLabelPrefix is a prefix for labels that provide meta information. | |||
// Labels with this prefix are used for intermediate label processing and | |||
// will not be attached to time series. | |||
MetaLabelPrefix = "__meta_" | |||
// TmpLabelPrefix is a prefix for temporary labels as part of relabelling. | |||
// Labels with this prefix are used for intermediate label processing and | |||
// will not be attached to time series. This is reserved for use in | |||
// Prometheus configuration files by users. | |||
TmpLabelPrefix = "__tmp_" | |||
// ParamLabelPrefix is a prefix for labels that provide URL parameters | |||
// used to scrape a target. | |||
ParamLabelPrefix = "__param_" | |||
// JobLabel is the label name indicating the job from which a timeseries | |||
// was scraped. | |||
JobLabel = "job" | |||
// InstanceLabel is the label name used for the instance label. | |||
InstanceLabel = "instance" | |||
// BucketLabel is used for the label that defines the upper bound of a | |||
// bucket of a histogram ("le" -> "less or equal"). | |||
BucketLabel = "le" | |||
// QuantileLabel is used for the label that defines the quantile in a | |||
// summary. | |||
QuantileLabel = "quantile" | |||
) | |||
// LabelNameRE is a regular expression matching valid label names. Note that the | |||
// IsValid method of LabelName performs the same check but faster than a match | |||
// with this regular expression. | |||
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") | |||
// A LabelName is a key for a LabelSet or Metric. It has a value associated | |||
// therewith. | |||
type LabelName string | |||
// IsValid is true iff the label name matches the pattern of LabelNameRE. This | |||
// method, however, does not use LabelNameRE for the check but a much faster | |||
// hardcoded implementation. | |||
func (ln LabelName) IsValid() bool { | |||
if len(ln) == 0 { | |||
return false | |||
} | |||
for i, b := range ln { | |||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// UnmarshalYAML implements the yaml.Unmarshaler interface. | |||
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { | |||
var s string | |||
if err := unmarshal(&s); err != nil { | |||
return err | |||
} | |||
if !LabelName(s).IsValid() { | |||
return fmt.Errorf("%q is not a valid label name", s) | |||
} | |||
*ln = LabelName(s) | |||
return nil | |||
} | |||
// UnmarshalJSON implements the json.Unmarshaler interface. | |||
func (ln *LabelName) UnmarshalJSON(b []byte) error { | |||
var s string | |||
if err := json.Unmarshal(b, &s); err != nil { | |||
return err | |||
} | |||
if !LabelName(s).IsValid() { | |||
return fmt.Errorf("%q is not a valid label name", s) | |||
} | |||
*ln = LabelName(s) | |||
return nil | |||
} | |||
// LabelNames is a sortable LabelName slice. In implements sort.Interface. | |||
type LabelNames []LabelName | |||
func (l LabelNames) Len() int { | |||
return len(l) | |||
} | |||
func (l LabelNames) Less(i, j int) bool { | |||
return l[i] < l[j] | |||
} | |||
func (l LabelNames) Swap(i, j int) { | |||
l[i], l[j] = l[j], l[i] | |||
} | |||
func (l LabelNames) String() string { | |||
labelStrings := make([]string, 0, len(l)) | |||
for _, label := range l { | |||
labelStrings = append(labelStrings, string(label)) | |||
} | |||
return strings.Join(labelStrings, ", ") | |||
} | |||
// A LabelValue is an associated value for a LabelName. | |||
type LabelValue string | |||
// IsValid returns true iff the string is a valid UTF8. | |||
func (lv LabelValue) IsValid() bool { | |||
return utf8.ValidString(string(lv)) | |||
} | |||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface. | |||
type LabelValues []LabelValue | |||
func (l LabelValues) Len() int { | |||
return len(l) | |||
} | |||
func (l LabelValues) Less(i, j int) bool { | |||
return string(l[i]) < string(l[j]) | |||
} | |||
func (l LabelValues) Swap(i, j int) { | |||
l[i], l[j] = l[j], l[i] | |||
} | |||
// LabelPair pairs a name with a value. | |||
type LabelPair struct { | |||
Name LabelName | |||
Value LabelValue | |||
} | |||
// LabelPairs is a sortable slice of LabelPair pointers. It implements | |||
// sort.Interface. | |||
type LabelPairs []*LabelPair | |||
func (l LabelPairs) Len() int { | |||
return len(l) | |||
} | |||
func (l LabelPairs) Less(i, j int) bool { | |||
switch { | |||
case l[i].Name > l[j].Name: | |||
return false | |||
case l[i].Name < l[j].Name: | |||
return true | |||
case l[i].Value > l[j].Value: | |||
return false | |||
case l[i].Value < l[j].Value: | |||
return true | |||
default: | |||
return false | |||
} | |||
} | |||
func (l LabelPairs) Swap(i, j int) { | |||
l[i], l[j] = l[j], l[i] | |||
} |
@@ -0,0 +1,169 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"sort" | |||
"strings" | |||
) | |||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet | |||
// may be fully-qualified down to the point where it may resolve to a single | |||
// Metric in the data store or not. All operations that occur within the realm | |||
// of a LabelSet can emit a vector of Metric entities to which the LabelSet may | |||
// match. | |||
type LabelSet map[LabelName]LabelValue | |||
// Validate checks whether all names and values in the label set | |||
// are valid. | |||
func (ls LabelSet) Validate() error { | |||
for ln, lv := range ls { | |||
if !ln.IsValid() { | |||
return fmt.Errorf("invalid name %q", ln) | |||
} | |||
if !lv.IsValid() { | |||
return fmt.Errorf("invalid value %q", lv) | |||
} | |||
} | |||
return nil | |||
} | |||
// Equal returns true iff both label sets have exactly the same key/value pairs. | |||
func (ls LabelSet) Equal(o LabelSet) bool { | |||
if len(ls) != len(o) { | |||
return false | |||
} | |||
for ln, lv := range ls { | |||
olv, ok := o[ln] | |||
if !ok { | |||
return false | |||
} | |||
if olv != lv { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// Before compares the metrics, using the following criteria: | |||
// | |||
// If m has fewer labels than o, it is before o. If it has more, it is not. | |||
// | |||
// If the number of labels is the same, the superset of all label names is | |||
// sorted alphanumerically. The first differing label pair found in that order | |||
// determines the outcome: If the label does not exist at all in m, then m is | |||
// before o, and vice versa. Otherwise the label value is compared | |||
// alphanumerically. | |||
// | |||
// If m and o are equal, the method returns false. | |||
func (ls LabelSet) Before(o LabelSet) bool { | |||
if len(ls) < len(o) { | |||
return true | |||
} | |||
if len(ls) > len(o) { | |||
return false | |||
} | |||
lns := make(LabelNames, 0, len(ls)+len(o)) | |||
for ln := range ls { | |||
lns = append(lns, ln) | |||
} | |||
for ln := range o { | |||
lns = append(lns, ln) | |||
} | |||
// It's probably not worth it to de-dup lns. | |||
sort.Sort(lns) | |||
for _, ln := range lns { | |||
mlv, ok := ls[ln] | |||
if !ok { | |||
return true | |||
} | |||
olv, ok := o[ln] | |||
if !ok { | |||
return false | |||
} | |||
if mlv < olv { | |||
return true | |||
} | |||
if mlv > olv { | |||
return false | |||
} | |||
} | |||
return false | |||
} | |||
// Clone returns a copy of the label set. | |||
func (ls LabelSet) Clone() LabelSet { | |||
lsn := make(LabelSet, len(ls)) | |||
for ln, lv := range ls { | |||
lsn[ln] = lv | |||
} | |||
return lsn | |||
} | |||
// Merge is a helper function to non-destructively merge two label sets. | |||
func (l LabelSet) Merge(other LabelSet) LabelSet { | |||
result := make(LabelSet, len(l)) | |||
for k, v := range l { | |||
result[k] = v | |||
} | |||
for k, v := range other { | |||
result[k] = v | |||
} | |||
return result | |||
} | |||
func (l LabelSet) String() string { | |||
lstrs := make([]string, 0, len(l)) | |||
for l, v := range l { | |||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) | |||
} | |||
sort.Strings(lstrs) | |||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) | |||
} | |||
// Fingerprint returns the LabelSet's fingerprint. | |||
func (ls LabelSet) Fingerprint() Fingerprint { | |||
return labelSetToFingerprint(ls) | |||
} | |||
// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing | |||
// algorithm, which is, however, more susceptible to hash collisions. | |||
func (ls LabelSet) FastFingerprint() Fingerprint { | |||
return labelSetToFastFingerprint(ls) | |||
} | |||
// UnmarshalJSON implements the json.Unmarshaler interface. | |||
func (l *LabelSet) UnmarshalJSON(b []byte) error { | |||
var m map[LabelName]LabelValue | |||
if err := json.Unmarshal(b, &m); err != nil { | |||
return err | |||
} | |||
// encoding/json only unmarshals maps of the form map[string]T. It treats | |||
// LabelName as a string and does not call its UnmarshalJSON method. | |||
// Thus, we have to replicate the behavior here. | |||
for ln := range m { | |||
if !ln.IsValid() { | |||
return fmt.Errorf("%q is not a valid label name", ln) | |||
} | |||
} | |||
*l = LabelSet(m) | |||
return nil | |||
} |
@@ -0,0 +1,103 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"fmt" | |||
"regexp" | |||
"sort" | |||
"strings" | |||
) | |||
var ( | |||
separator = []byte{0} | |||
// MetricNameRE is a regular expression matching valid metric | |||
// names. Note that the IsValidMetricName function performs the same | |||
// check but faster than a match with this regular expression. | |||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) | |||
) | |||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is | |||
// a singleton and refers to one and only one stream of samples. | |||
type Metric LabelSet | |||
// Equal compares the metrics. | |||
func (m Metric) Equal(o Metric) bool { | |||
return LabelSet(m).Equal(LabelSet(o)) | |||
} | |||
// Before compares the metrics' underlying label sets. | |||
func (m Metric) Before(o Metric) bool { | |||
return LabelSet(m).Before(LabelSet(o)) | |||
} | |||
// Clone returns a copy of the Metric. | |||
func (m Metric) Clone() Metric { | |||
clone := make(Metric, len(m)) | |||
for k, v := range m { | |||
clone[k] = v | |||
} | |||
return clone | |||
} | |||
func (m Metric) String() string { | |||
metricName, hasName := m[MetricNameLabel] | |||
numLabels := len(m) - 1 | |||
if !hasName { | |||
numLabels = len(m) | |||
} | |||
labelStrings := make([]string, 0, numLabels) | |||
for label, value := range m { | |||
if label != MetricNameLabel { | |||
labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) | |||
} | |||
} | |||
switch numLabels { | |||
case 0: | |||
if hasName { | |||
return string(metricName) | |||
} | |||
return "{}" | |||
default: | |||
sort.Strings(labelStrings) | |||
return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) | |||
} | |||
} | |||
// Fingerprint returns a Metric's Fingerprint. | |||
func (m Metric) Fingerprint() Fingerprint { | |||
return LabelSet(m).Fingerprint() | |||
} | |||
// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing | |||
// algorithm, which is, however, more susceptible to hash collisions. | |||
func (m Metric) FastFingerprint() Fingerprint { | |||
return LabelSet(m).FastFingerprint() | |||
} | |||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. | |||
// This function, however, does not use MetricNameRE for the check but a much | |||
// faster hardcoded implementation. | |||
func IsValidMetricName(n LabelValue) bool { | |||
if len(n) == 0 { | |||
return false | |||
} | |||
for i, b := range n { | |||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { | |||
return false | |||
} | |||
} | |||
return true | |||
} |
@@ -0,0 +1,16 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package model contains common data structures that are shared across | |||
// Prometheus components and libraries. | |||
package model |
@@ -0,0 +1,144 @@ | |||
// Copyright 2014 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"sort" | |||
) | |||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is | |||
// used to separate label names, label values, and other strings from each other | |||
// when calculating their combined hash value (aka signature aka fingerprint). | |||
const SeparatorByte byte = 255 | |||
var ( | |||
// cache the signature of an empty label set. | |||
emptyLabelSignature = hashNew() | |||
) | |||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a | |||
// given label set. (Collisions are possible but unlikely if the number of label | |||
// sets the function is applied to is small.) | |||
func LabelsToSignature(labels map[string]string) uint64 { | |||
if len(labels) == 0 { | |||
return emptyLabelSignature | |||
} | |||
labelNames := make([]string, 0, len(labels)) | |||
for labelName := range labels { | |||
labelNames = append(labelNames, labelName) | |||
} | |||
sort.Strings(labelNames) | |||
sum := hashNew() | |||
for _, labelName := range labelNames { | |||
sum = hashAdd(sum, labelName) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
sum = hashAdd(sum, labels[labelName]) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
} | |||
return sum | |||
} | |||
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as | |||
// parameter (rather than a label map) and returns a Fingerprint. | |||
func labelSetToFingerprint(ls LabelSet) Fingerprint { | |||
if len(ls) == 0 { | |||
return Fingerprint(emptyLabelSignature) | |||
} | |||
labelNames := make(LabelNames, 0, len(ls)) | |||
for labelName := range ls { | |||
labelNames = append(labelNames, labelName) | |||
} | |||
sort.Sort(labelNames) | |||
sum := hashNew() | |||
for _, labelName := range labelNames { | |||
sum = hashAdd(sum, string(labelName)) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
sum = hashAdd(sum, string(ls[labelName])) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
} | |||
return Fingerprint(sum) | |||
} | |||
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a | |||
// faster and less allocation-heavy hash function, which is more susceptible to | |||
// create hash collisions. Therefore, collision detection should be applied. | |||
func labelSetToFastFingerprint(ls LabelSet) Fingerprint { | |||
if len(ls) == 0 { | |||
return Fingerprint(emptyLabelSignature) | |||
} | |||
var result uint64 | |||
for labelName, labelValue := range ls { | |||
sum := hashNew() | |||
sum = hashAdd(sum, string(labelName)) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
sum = hashAdd(sum, string(labelValue)) | |||
result ^= sum | |||
} | |||
return Fingerprint(result) | |||
} | |||
// SignatureForLabels works like LabelsToSignature but takes a Metric as | |||
// parameter (rather than a label map) and only includes the labels with the | |||
// specified LabelNames into the signature calculation. The labels passed in | |||
// will be sorted by this function. | |||
func SignatureForLabels(m Metric, labels ...LabelName) uint64 { | |||
if len(labels) == 0 { | |||
return emptyLabelSignature | |||
} | |||
sort.Sort(LabelNames(labels)) | |||
sum := hashNew() | |||
for _, label := range labels { | |||
sum = hashAdd(sum, string(label)) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
sum = hashAdd(sum, string(m[label])) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
} | |||
return sum | |||
} | |||
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as | |||
// parameter (rather than a label map) and excludes the labels with any of the | |||
// specified LabelNames from the signature calculation. | |||
func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { | |||
if len(m) == 0 { | |||
return emptyLabelSignature | |||
} | |||
labelNames := make(LabelNames, 0, len(m)) | |||
for labelName := range m { | |||
if _, exclude := labels[labelName]; !exclude { | |||
labelNames = append(labelNames, labelName) | |||
} | |||
} | |||
if len(labelNames) == 0 { | |||
return emptyLabelSignature | |||
} | |||
sort.Sort(labelNames) | |||
sum := hashNew() | |||
for _, labelName := range labelNames { | |||
sum = hashAdd(sum, string(labelName)) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
sum = hashAdd(sum, string(m[labelName])) | |||
sum = hashAddByte(sum, SeparatorByte) | |||
} | |||
return sum | |||
} |
@@ -0,0 +1,106 @@ | |||
// Copyright 2015 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"regexp" | |||
"time" | |||
) | |||
// Matcher describes a matches the value of a given label. | |||
type Matcher struct { | |||
Name LabelName `json:"name"` | |||
Value string `json:"value"` | |||
IsRegex bool `json:"isRegex"` | |||
} | |||
func (m *Matcher) UnmarshalJSON(b []byte) error { | |||
type plain Matcher | |||
if err := json.Unmarshal(b, (*plain)(m)); err != nil { | |||
return err | |||
} | |||
if len(m.Name) == 0 { | |||
return fmt.Errorf("label name in matcher must not be empty") | |||
} | |||
if m.IsRegex { | |||
if _, err := regexp.Compile(m.Value); err != nil { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
// Validate returns true iff all fields of the matcher have valid values. | |||
func (m *Matcher) Validate() error { | |||
if !m.Name.IsValid() { | |||
return fmt.Errorf("invalid name %q", m.Name) | |||
} | |||
if m.IsRegex { | |||
if _, err := regexp.Compile(m.Value); err != nil { | |||
return fmt.Errorf("invalid regular expression %q", m.Value) | |||
} | |||
} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { | |||
return fmt.Errorf("invalid value %q", m.Value) | |||
} | |||
return nil | |||
} | |||
// Silence defines the representation of a silence definition in the Prometheus | |||
// eco-system. | |||
type Silence struct { | |||
ID uint64 `json:"id,omitempty"` | |||
Matchers []*Matcher `json:"matchers"` | |||
StartsAt time.Time `json:"startsAt"` | |||
EndsAt time.Time `json:"endsAt"` | |||
CreatedAt time.Time `json:"createdAt,omitempty"` | |||
CreatedBy string `json:"createdBy"` | |||
Comment string `json:"comment,omitempty"` | |||
} | |||
// Validate returns true iff all fields of the silence have valid values. | |||
func (s *Silence) Validate() error { | |||
if len(s.Matchers) == 0 { | |||
return fmt.Errorf("at least one matcher required") | |||
} | |||
for _, m := range s.Matchers { | |||
if err := m.Validate(); err != nil { | |||
return fmt.Errorf("invalid matcher: %s", err) | |||
} | |||
} | |||
if s.StartsAt.IsZero() { | |||
return fmt.Errorf("start time missing") | |||
} | |||
if s.EndsAt.IsZero() { | |||
return fmt.Errorf("end time missing") | |||
} | |||
if s.EndsAt.Before(s.StartsAt) { | |||
return fmt.Errorf("start time must be before end time") | |||
} | |||
if s.CreatedBy == "" { | |||
return fmt.Errorf("creator information missing") | |||
} | |||
if s.Comment == "" { | |||
return fmt.Errorf("comment missing") | |||
} | |||
if s.CreatedAt.IsZero() { | |||
return fmt.Errorf("creation timestamp missing") | |||
} | |||
return nil | |||
} |
@@ -0,0 +1,264 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"fmt" | |||
"math" | |||
"regexp" | |||
"strconv" | |||
"strings" | |||
"time" | |||
) | |||
const ( | |||
// MinimumTick is the minimum supported time resolution. This has to be | |||
// at least time.Second in order for the code below to work. | |||
minimumTick = time.Millisecond | |||
// second is the Time duration equivalent to one second. | |||
second = int64(time.Second / minimumTick) | |||
// The number of nanoseconds per minimum tick. | |||
nanosPerTick = int64(minimumTick / time.Nanosecond) | |||
// Earliest is the earliest Time representable. Handy for | |||
// initializing a high watermark. | |||
Earliest = Time(math.MinInt64) | |||
// Latest is the latest Time representable. Handy for initializing | |||
// a low watermark. | |||
Latest = Time(math.MaxInt64) | |||
) | |||
// Time is the number of milliseconds since the epoch | |||
// (1970-01-01 00:00 UTC) excluding leap seconds. | |||
type Time int64 | |||
// Interval describes an interval between two timestamps. | |||
type Interval struct { | |||
Start, End Time | |||
} | |||
// Now returns the current time as a Time. | |||
func Now() Time { | |||
return TimeFromUnixNano(time.Now().UnixNano()) | |||
} | |||
// TimeFromUnix returns the Time equivalent to the Unix Time t | |||
// provided in seconds. | |||
func TimeFromUnix(t int64) Time { | |||
return Time(t * second) | |||
} | |||
// TimeFromUnixNano returns the Time equivalent to the Unix Time | |||
// t provided in nanoseconds. | |||
func TimeFromUnixNano(t int64) Time { | |||
return Time(t / nanosPerTick) | |||
} | |||
// Equal reports whether two Times represent the same instant. | |||
func (t Time) Equal(o Time) bool { | |||
return t == o | |||
} | |||
// Before reports whether the Time t is before o. | |||
func (t Time) Before(o Time) bool { | |||
return t < o | |||
} | |||
// After reports whether the Time t is after o. | |||
func (t Time) After(o Time) bool { | |||
return t > o | |||
} | |||
// Add returns the Time t + d. | |||
func (t Time) Add(d time.Duration) Time { | |||
return t + Time(d/minimumTick) | |||
} | |||
// Sub returns the Duration t - o. | |||
func (t Time) Sub(o Time) time.Duration { | |||
return time.Duration(t-o) * minimumTick | |||
} | |||
// Time returns the time.Time representation of t. | |||
func (t Time) Time() time.Time { | |||
return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) | |||
} | |||
// Unix returns t as a Unix time, the number of seconds elapsed | |||
// since January 1, 1970 UTC. | |||
func (t Time) Unix() int64 { | |||
return int64(t) / second | |||
} | |||
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed | |||
// since January 1, 1970 UTC. | |||
func (t Time) UnixNano() int64 { | |||
return int64(t) * nanosPerTick | |||
} | |||
// The number of digits after the dot. | |||
var dotPrecision = int(math.Log10(float64(second))) | |||
// String returns a string representation of the Time. | |||
func (t Time) String() string { | |||
return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) | |||
} | |||
// MarshalJSON implements the json.Marshaler interface. | |||
func (t Time) MarshalJSON() ([]byte, error) { | |||
return []byte(t.String()), nil | |||
} | |||
// UnmarshalJSON implements the json.Unmarshaler interface. | |||
func (t *Time) UnmarshalJSON(b []byte) error { | |||
p := strings.Split(string(b), ".") | |||
switch len(p) { | |||
case 1: | |||
v, err := strconv.ParseInt(string(p[0]), 10, 64) | |||
if err != nil { | |||
return err | |||
} | |||
*t = Time(v * second) | |||
case 2: | |||
v, err := strconv.ParseInt(string(p[0]), 10, 64) | |||
if err != nil { | |||
return err | |||
} | |||
v *= second | |||
prec := dotPrecision - len(p[1]) | |||
if prec < 0 { | |||
p[1] = p[1][:dotPrecision] | |||
} else if prec > 0 { | |||
p[1] = p[1] + strings.Repeat("0", prec) | |||
} | |||
va, err := strconv.ParseInt(p[1], 10, 32) | |||
if err != nil { | |||
return err | |||
} | |||
*t = Time(v + va) | |||
default: | |||
return fmt.Errorf("invalid time %q", string(b)) | |||
} | |||
return nil | |||
} | |||
// Duration wraps time.Duration. It is used to parse the custom duration format | |||
// from YAML. | |||
// This type should not propagate beyond the scope of input/output processing. | |||
type Duration time.Duration | |||
// Set implements pflag/flag.Value | |||
func (d *Duration) Set(s string) error { | |||
var err error | |||
*d, err = ParseDuration(s) | |||
return err | |||
} | |||
// Type implements pflag.Value | |||
func (d *Duration) Type() string { | |||
return "duration" | |||
} | |||
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") | |||
// ParseDuration parses a string into a time.Duration, assuming that a year | |||
// always has 365d, a week always has 7d, and a day always has 24h. | |||
func ParseDuration(durationStr string) (Duration, error) { | |||
matches := durationRE.FindStringSubmatch(durationStr) | |||
if len(matches) != 3 { | |||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr) | |||
} | |||
var ( | |||
n, _ = strconv.Atoi(matches[1]) | |||
dur = time.Duration(n) * time.Millisecond | |||
) | |||
switch unit := matches[2]; unit { | |||
case "y": | |||
dur *= 1000 * 60 * 60 * 24 * 365 | |||
case "w": | |||
dur *= 1000 * 60 * 60 * 24 * 7 | |||
case "d": | |||
dur *= 1000 * 60 * 60 * 24 | |||
case "h": | |||
dur *= 1000 * 60 * 60 | |||
case "m": | |||
dur *= 1000 * 60 | |||
case "s": | |||
dur *= 1000 | |||
case "ms": | |||
// Value already correct | |||
default: | |||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) | |||
} | |||
return Duration(dur), nil | |||
} | |||
func (d Duration) String() string { | |||
var ( | |||
ms = int64(time.Duration(d) / time.Millisecond) | |||
unit = "ms" | |||
) | |||
if ms == 0 { | |||
return "0s" | |||
} | |||
factors := map[string]int64{ | |||
"y": 1000 * 60 * 60 * 24 * 365, | |||
"w": 1000 * 60 * 60 * 24 * 7, | |||
"d": 1000 * 60 * 60 * 24, | |||
"h": 1000 * 60 * 60, | |||
"m": 1000 * 60, | |||
"s": 1000, | |||
"ms": 1, | |||
} | |||
switch int64(0) { | |||
case ms % factors["y"]: | |||
unit = "y" | |||
case ms % factors["w"]: | |||
unit = "w" | |||
case ms % factors["d"]: | |||
unit = "d" | |||
case ms % factors["h"]: | |||
unit = "h" | |||
case ms % factors["m"]: | |||
unit = "m" | |||
case ms % factors["s"]: | |||
unit = "s" | |||
} | |||
return fmt.Sprintf("%v%v", ms/factors[unit], unit) | |||
} | |||
// MarshalYAML implements the yaml.Marshaler interface. | |||
func (d Duration) MarshalYAML() (interface{}, error) { | |||
return d.String(), nil | |||
} | |||
// UnmarshalYAML implements the yaml.Unmarshaler interface. | |||
func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { | |||
var s string | |||
if err := unmarshal(&s); err != nil { | |||
return err | |||
} | |||
dur, err := ParseDuration(s) | |||
if err != nil { | |||
return err | |||
} | |||
*d = dur | |||
return nil | |||
} |
@@ -0,0 +1,416 @@ | |||
// Copyright 2013 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package model | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"math" | |||
"sort" | |||
"strconv" | |||
"strings" | |||
) | |||
var ( | |||
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a | |||
// non-existing sample pair. It is a SamplePair with timestamp Earliest and | |||
// value 0.0. Note that the natural zero value of SamplePair has a timestamp | |||
// of 0, which is possible to appear in a real SamplePair and thus not | |||
// suitable to signal a non-existing SamplePair. | |||
ZeroSamplePair = SamplePair{Timestamp: Earliest} | |||
// ZeroSample is the pseudo zero-value of Sample used to signal a | |||
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, | |||
// and metric nil. Note that the natural zero value of Sample has a timestamp | |||
// of 0, which is possible to appear in a real Sample and thus not suitable | |||
// to signal a non-existing Sample. | |||
ZeroSample = Sample{Timestamp: Earliest} | |||
) | |||
// A SampleValue is a representation of a value for a given sample at a given | |||
// time. | |||
type SampleValue float64 | |||
// MarshalJSON implements json.Marshaler. | |||
func (v SampleValue) MarshalJSON() ([]byte, error) { | |||
return json.Marshal(v.String()) | |||
} | |||
// UnmarshalJSON implements json.Unmarshaler. | |||
func (v *SampleValue) UnmarshalJSON(b []byte) error { | |||
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { | |||
return fmt.Errorf("sample value must be a quoted string") | |||
} | |||
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) | |||
if err != nil { | |||
return err | |||
} | |||
*v = SampleValue(f) | |||
return nil | |||
} | |||
// Equal returns true if the value of v and o is equal or if both are NaN. Note | |||
// that v==o is false if both are NaN. If you want the conventional float | |||
// behavior, use == to compare two SampleValues. | |||
func (v SampleValue) Equal(o SampleValue) bool { | |||
if v == o { | |||
return true | |||
} | |||
return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) | |||
} | |||
func (v SampleValue) String() string { | |||
return strconv.FormatFloat(float64(v), 'f', -1, 64) | |||
} | |||
// SamplePair pairs a SampleValue with a Timestamp. | |||
type SamplePair struct { | |||
Timestamp Time | |||
Value SampleValue | |||
} | |||
// MarshalJSON implements json.Marshaler. | |||
func (s SamplePair) MarshalJSON() ([]byte, error) { | |||
t, err := json.Marshal(s.Timestamp) | |||
if err != nil { | |||
return nil, err | |||
} | |||
v, err := json.Marshal(s.Value) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil | |||
} | |||
// UnmarshalJSON implements json.Unmarshaler. | |||
func (s *SamplePair) UnmarshalJSON(b []byte) error { | |||
v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} | |||
return json.Unmarshal(b, &v) | |||
} | |||
// Equal returns true if this SamplePair and o have equal Values and equal | |||
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. | |||
func (s *SamplePair) Equal(o *SamplePair) bool { | |||
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) | |||
} | |||
func (s SamplePair) String() string { | |||
return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) | |||
} | |||
// Sample is a sample pair associated with a metric. | |||
type Sample struct { | |||
Metric Metric `json:"metric"` | |||
Value SampleValue `json:"value"` | |||
Timestamp Time `json:"timestamp"` | |||
} | |||
// Equal compares first the metrics, then the timestamp, then the value. The | |||
// semantics of value equality is defined by SampleValue.Equal. | |||
func (s *Sample) Equal(o *Sample) bool { | |||
if s == o { | |||
return true | |||
} | |||
if !s.Metric.Equal(o.Metric) { | |||
return false | |||
} | |||
if !s.Timestamp.Equal(o.Timestamp) { | |||
return false | |||
} | |||
return s.Value.Equal(o.Value) | |||
} | |||
func (s Sample) String() string { | |||
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ | |||
Timestamp: s.Timestamp, | |||
Value: s.Value, | |||
}) | |||
} | |||
// MarshalJSON implements json.Marshaler. | |||
func (s Sample) MarshalJSON() ([]byte, error) { | |||
v := struct { | |||
Metric Metric `json:"metric"` | |||
Value SamplePair `json:"value"` | |||
}{ | |||
Metric: s.Metric, | |||
Value: SamplePair{ | |||
Timestamp: s.Timestamp, | |||
Value: s.Value, | |||
}, | |||
} | |||
return json.Marshal(&v) | |||
} | |||
// UnmarshalJSON implements json.Unmarshaler. | |||
func (s *Sample) UnmarshalJSON(b []byte) error { | |||
v := struct { | |||
Metric Metric `json:"metric"` | |||
Value SamplePair `json:"value"` | |||
}{ | |||
Metric: s.Metric, | |||
Value: SamplePair{ | |||
Timestamp: s.Timestamp, | |||
Value: s.Value, | |||
}, | |||
} | |||
if err := json.Unmarshal(b, &v); err != nil { | |||
return err | |||
} | |||
s.Metric = v.Metric | |||
s.Timestamp = v.Value.Timestamp | |||
s.Value = v.Value.Value | |||
return nil | |||
} | |||
// Samples is a sortable Sample slice. It implements sort.Interface. | |||
type Samples []*Sample | |||
func (s Samples) Len() int { | |||
return len(s) | |||
} | |||
// Less compares first the metrics, then the timestamp. | |||
func (s Samples) Less(i, j int) bool { | |||
switch { | |||
case s[i].Metric.Before(s[j].Metric): | |||
return true | |||
case s[j].Metric.Before(s[i].Metric): | |||
return false | |||
case s[i].Timestamp.Before(s[j].Timestamp): | |||
return true | |||
default: | |||
return false | |||
} | |||
} | |||
func (s Samples) Swap(i, j int) { | |||
s[i], s[j] = s[j], s[i] | |||
} | |||
// Equal compares two sets of samples and returns true if they are equal. | |||
func (s Samples) Equal(o Samples) bool { | |||
if len(s) != len(o) { | |||
return false | |||
} | |||
for i, sample := range s { | |||
if !sample.Equal(o[i]) { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// SampleStream is a stream of Values belonging to an attached COWMetric. | |||
type SampleStream struct { | |||
Metric Metric `json:"metric"` | |||
Values []SamplePair `json:"values"` | |||
} | |||
func (ss SampleStream) String() string { | |||
vals := make([]string, len(ss.Values)) | |||
for i, v := range ss.Values { | |||
vals[i] = v.String() | |||
} | |||
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) | |||
} | |||
// Value is a generic interface for values resulting from a query evaluation. | |||
type Value interface { | |||
Type() ValueType | |||
String() string | |||
} | |||
func (Matrix) Type() ValueType { return ValMatrix } | |||
func (Vector) Type() ValueType { return ValVector } | |||
func (*Scalar) Type() ValueType { return ValScalar } | |||
func (*String) Type() ValueType { return ValString } | |||
type ValueType int | |||
const ( | |||
ValNone ValueType = iota | |||
ValScalar | |||
ValVector | |||
ValMatrix | |||
ValString | |||
) | |||
// MarshalJSON implements json.Marshaler. | |||
func (et ValueType) MarshalJSON() ([]byte, error) { | |||
return json.Marshal(et.String()) | |||
} | |||
func (et *ValueType) UnmarshalJSON(b []byte) error { | |||
var s string | |||
if err := json.Unmarshal(b, &s); err != nil { | |||
return err | |||
} | |||
switch s { | |||
case "<ValNone>": | |||
*et = ValNone | |||
case "scalar": | |||
*et = ValScalar | |||
case "vector": | |||
*et = ValVector | |||
case "matrix": | |||
*et = ValMatrix | |||
case "string": | |||
*et = ValString | |||
default: | |||
return fmt.Errorf("unknown value type %q", s) | |||
} | |||
return nil | |||
} | |||
func (e ValueType) String() string { | |||
switch e { | |||
case ValNone: | |||
return "<ValNone>" | |||
case ValScalar: | |||
return "scalar" | |||
case ValVector: | |||
return "vector" | |||
case ValMatrix: | |||
return "matrix" | |||
case ValString: | |||
return "string" | |||
} | |||
panic("ValueType.String: unhandled value type") | |||
} | |||
// Scalar is a scalar value evaluated at the set timestamp. | |||
type Scalar struct { | |||
Value SampleValue `json:"value"` | |||
Timestamp Time `json:"timestamp"` | |||
} | |||
func (s Scalar) String() string { | |||
return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) | |||
} | |||
// MarshalJSON implements json.Marshaler. | |||
func (s Scalar) MarshalJSON() ([]byte, error) { | |||
v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) | |||
return json.Marshal([...]interface{}{s.Timestamp, string(v)}) | |||
} | |||
// UnmarshalJSON implements json.Unmarshaler. | |||
func (s *Scalar) UnmarshalJSON(b []byte) error { | |||
var f string | |||
v := [...]interface{}{&s.Timestamp, &f} | |||
if err := json.Unmarshal(b, &v); err != nil { | |||
return err | |||
} | |||
value, err := strconv.ParseFloat(f, 64) | |||
if err != nil { | |||
return fmt.Errorf("error parsing sample value: %s", err) | |||
} | |||
s.Value = SampleValue(value) | |||
return nil | |||
} | |||
// String is a string value evaluated at the set timestamp. | |||
type String struct { | |||
Value string `json:"value"` | |||
Timestamp Time `json:"timestamp"` | |||
} | |||
func (s *String) String() string { | |||
return s.Value | |||
} | |||
// MarshalJSON implements json.Marshaler. | |||
func (s String) MarshalJSON() ([]byte, error) { | |||
return json.Marshal([]interface{}{s.Timestamp, s.Value}) | |||
} | |||
// UnmarshalJSON implements json.Unmarshaler. | |||
func (s *String) UnmarshalJSON(b []byte) error { | |||
v := [...]interface{}{&s.Timestamp, &s.Value} | |||
return json.Unmarshal(b, &v) | |||
} | |||
// Vector is basically only an alias for Samples, but the | |||
// contract is that in a Vector, all Samples have the same timestamp. | |||
type Vector []*Sample | |||
func (vec Vector) String() string { | |||
entries := make([]string, len(vec)) | |||
for i, s := range vec { | |||
entries[i] = s.String() | |||
} | |||
return strings.Join(entries, "\n") | |||
} | |||
func (vec Vector) Len() int { return len(vec) } | |||
func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } | |||
// Less compares first the metrics, then the timestamp. | |||
func (vec Vector) Less(i, j int) bool { | |||
switch { | |||
case vec[i].Metric.Before(vec[j].Metric): | |||
return true | |||
case vec[j].Metric.Before(vec[i].Metric): | |||
return false | |||
case vec[i].Timestamp.Before(vec[j].Timestamp): | |||
return true | |||
default: | |||
return false | |||
} | |||
} | |||
// Equal compares two sets of samples and returns true if they are equal. | |||
func (vec Vector) Equal(o Vector) bool { | |||
if len(vec) != len(o) { | |||
return false | |||
} | |||
for i, sample := range vec { | |||
if !sample.Equal(o[i]) { | |||
return false | |||
} | |||
} | |||
return true | |||
} | |||
// Matrix is a list of time series. | |||
type Matrix []*SampleStream | |||
func (m Matrix) Len() int { return len(m) } | |||
func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } | |||
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } | |||
func (mat Matrix) String() string { | |||
matCp := make(Matrix, len(mat)) | |||
copy(matCp, mat) | |||
sort.Sort(matCp) | |||
strs := make([]string, len(matCp)) | |||
for i, ss := range matCp { | |||
strs[i] = ss.String() | |||
} | |||
return strings.Join(strs, "\n") | |||
} |
@@ -0,0 +1,201 @@ | |||
Apache License | |||
Version 2.0, January 2004 | |||
http://www.apache.org/licenses/ | |||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
1. Definitions. | |||
"License" shall mean the terms and conditions for use, reproduction, | |||
and distribution as defined by Sections 1 through 9 of this document. | |||
"Licensor" shall mean the copyright owner or entity authorized by | |||
the copyright owner that is granting the License. | |||
"Legal Entity" shall mean the union of the acting entity and all | |||
other entities that control, are controlled by, or are under common | |||
control with that entity. For the purposes of this definition, | |||
"control" means (i) the power, direct or indirect, to cause the | |||
direction or management of such entity, whether by contract or | |||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
outstanding shares, or (iii) beneficial ownership of such entity. | |||
"You" (or "Your") shall mean an individual or Legal Entity | |||
exercising permissions granted by this License. | |||
"Source" form shall mean the preferred form for making modifications, | |||
including but not limited to software source code, documentation | |||
source, and configuration files. | |||
"Object" form shall mean any form resulting from mechanical | |||
transformation or translation of a Source form, including but | |||
not limited to compiled object code, generated documentation, | |||
and conversions to other media types. | |||
"Work" shall mean the work of authorship, whether in Source or | |||
Object form, made available under the License, as indicated by a | |||
copyright notice that is included in or attached to the work | |||
(an example is provided in the Appendix below). | |||
"Derivative Works" shall mean any work, whether in Source or Object | |||
form, that is based on (or derived from) the Work and for which the | |||
editorial revisions, annotations, elaborations, or other modifications | |||
represent, as a whole, an original work of authorship. For the purposes | |||
of this License, Derivative Works shall not include works that remain | |||
separable from, or merely link (or bind by name) to the interfaces of, | |||
the Work and Derivative Works thereof. | |||
"Contribution" shall mean any work of authorship, including | |||
the original version of the Work and any modifications or additions | |||
to that Work or Derivative Works thereof, that is intentionally | |||
submitted to Licensor for inclusion in the Work by the copyright owner | |||
or by an individual or Legal Entity authorized to submit on behalf of | |||
the copyright owner. For the purposes of this definition, "submitted" | |||
means any form of electronic, verbal, or written communication sent | |||
to the Licensor or its representatives, including but not limited to | |||
communication on electronic mailing lists, source code control systems, | |||
and issue tracking systems that are managed by, or on behalf of, the | |||
Licensor for the purpose of discussing and improving the Work, but | |||
excluding communication that is conspicuously marked or otherwise | |||
designated in writing by the copyright owner as "Not a Contribution." | |||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||
on behalf of whom a Contribution has been received by Licensor and | |||
subsequently incorporated within the Work. | |||
2. Grant of Copyright License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
copyright license to reproduce, prepare Derivative Works of, | |||
publicly display, publicly perform, sublicense, and distribute the | |||
Work and such Derivative Works in Source or Object form. | |||
3. Grant of Patent License. Subject to the terms and conditions of | |||
this License, each Contributor hereby grants to You a perpetual, | |||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
(except as stated in this section) patent license to make, have made, | |||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||
where such license applies only to those patent claims licensable | |||
by such Contributor that are necessarily infringed by their | |||
Contribution(s) alone or by combination of their Contribution(s) | |||
with the Work to which such Contribution(s) was submitted. If You | |||
institute patent litigation against any entity (including a | |||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
or a Contribution incorporated within the Work constitutes direct | |||
or contributory patent infringement, then any patent licenses | |||
granted to You under this License for that Work shall terminate | |||
as of the date such litigation is filed. | |||
4. Redistribution. You may reproduce and distribute copies of the | |||
Work or Derivative Works thereof in any medium, with or without | |||
modifications, and in Source or Object form, provided that You | |||
meet the following conditions: | |||
(a) You must give any other recipients of the Work or | |||
Derivative Works a copy of this License; and | |||
(b) You must cause any modified files to carry prominent notices | |||
stating that You changed the files; and | |||
(c) You must retain, in the Source form of any Derivative Works | |||
that You distribute, all copyright, patent, trademark, and | |||
attribution notices from the Source form of the Work, | |||
excluding those notices that do not pertain to any part of | |||
the Derivative Works; and | |||
(d) If the Work includes a "NOTICE" text file as part of its | |||
distribution, then any Derivative Works that You distribute must | |||
include a readable copy of the attribution notices contained | |||
within such NOTICE file, excluding those notices that do not | |||
pertain to any part of the Derivative Works, in at least one | |||
of the following places: within a NOTICE text file distributed | |||
as part of the Derivative Works; within the Source form or | |||
documentation, if provided along with the Derivative Works; or, | |||
within a display generated by the Derivative Works, if and | |||
wherever such third-party notices normally appear. The contents | |||
of the NOTICE file are for informational purposes only and | |||
do not modify the License. You may add Your own attribution | |||
notices within Derivative Works that You distribute, alongside | |||
or as an addendum to the NOTICE text from the Work, provided | |||
that such additional attribution notices cannot be construed | |||
as modifying the License. | |||
You may add Your own copyright statement to Your modifications and | |||
may provide additional or different license terms and conditions | |||
for use, reproduction, or distribution of Your modifications, or | |||
for any such Derivative Works as a whole, provided Your use, | |||
reproduction, and distribution of the Work otherwise complies with | |||
the conditions stated in this License. | |||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||
any Contribution intentionally submitted for inclusion in the Work | |||
by You to the Licensor shall be under the terms and conditions of | |||
this License, without any additional terms or conditions. | |||
Notwithstanding the above, nothing herein shall supersede or modify | |||
the terms of any separate license agreement you may have executed | |||
with Licensor regarding such Contributions. | |||
6. Trademarks. This License does not grant permission to use the trade | |||
names, trademarks, service marks, or product names of the Licensor, | |||
except as required for reasonable and customary use in describing the | |||
origin of the Work and reproducing the content of the NOTICE file. | |||
7. Disclaimer of Warranty. Unless required by applicable law or | |||
agreed to in writing, Licensor provides the Work (and each | |||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
implied, including, without limitation, any warranties or conditions | |||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||
appropriateness of using or redistributing the Work and assume any | |||
risks associated with Your exercise of permissions under this License. | |||
8. Limitation of Liability. In no event and under no legal theory, | |||
whether in tort (including negligence), contract, or otherwise, | |||
unless required by applicable law (such as deliberate and grossly | |||
negligent acts) or agreed to in writing, shall any Contributor be | |||
liable to You for damages, including any direct, indirect, special, | |||
incidental, or consequential damages of any character arising as a | |||
result of this License or out of the use or inability to use the | |||
Work (including but not limited to damages for loss of goodwill, | |||
work stoppage, computer failure or malfunction, or any and all | |||
other commercial damages or losses), even if such Contributor | |||
has been advised of the possibility of such damages. | |||
9. Accepting Warranty or Additional Liability. While redistributing | |||
the Work or Derivative Works thereof, You may choose to offer, | |||
and charge a fee for, acceptance of support, warranty, indemnity, | |||
or other liability obligations and/or rights consistent with this | |||
License. However, in accepting such obligations, You may act only | |||
on Your own behalf and on Your sole responsibility, not on behalf | |||
of any other Contributor, and only if You agree to indemnify, | |||
defend, and hold each Contributor harmless for any liability | |||
incurred by, or claims asserted against, such Contributor by reason | |||
of your accepting any such warranty or additional liability. | |||
END OF TERMS AND CONDITIONS | |||
APPENDIX: How to apply the Apache License to your work. | |||
To apply the Apache License to your work, attach the following | |||
boilerplate notice, with the fields enclosed by brackets "[]" | |||
replaced with your own identifying information. (Don't include | |||
the brackets!) The text should be enclosed in the appropriate | |||
comment syntax for the file format. We also recommend that a | |||
file or class name and description of purpose be included on the | |||
same "printed page" as the copyright notice for easier | |||
identification within third-party archives. | |||
Copyright [yyyy] [name of copyright owner] | |||
Licensed under the Apache License, Version 2.0 (the "License"); | |||
you may not use this file except in compliance with the License. | |||
You may obtain a copy of the License at | |||
http://www.apache.org/licenses/LICENSE-2.0 | |||
Unless required by applicable law or agreed to in writing, software | |||
distributed under the License is distributed on an "AS IS" BASIS, | |||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
See the License for the specific language governing permissions and | |||
limitations under the License. |
@@ -0,0 +1,7 @@ | |||
procfs provides functions to retrieve system, kernel and process | |||
metrics from the pseudo-filesystem proc. | |||
Copyright 2014-2015 The Prometheus Authors | |||
This product includes software developed at | |||
SoundCloud Ltd. (http://soundcloud.com/). |
@@ -0,0 +1,95 @@ | |||
// Copyright 2017 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package procfs | |||
import ( | |||
"bufio" | |||
"fmt" | |||
"io" | |||
"os" | |||
"strconv" | |||
"strings" | |||
) | |||
// A BuddyInfo is the details parsed from /proc/buddyinfo. | |||
// The data is comprised of an array of free fragments of each size. | |||
// The sizes are 2^n*PAGE_SIZE, where n is the array index. | |||
type BuddyInfo struct { | |||
Node string | |||
Zone string | |||
Sizes []float64 | |||
} | |||
// NewBuddyInfo reads the buddyinfo statistics. | |||
func NewBuddyInfo() ([]BuddyInfo, error) { | |||
fs, err := NewFS(DefaultMountPoint) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return fs.NewBuddyInfo() | |||
} | |||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. | |||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { | |||
file, err := os.Open(fs.Path("buddyinfo")) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer file.Close() | |||
return parseBuddyInfo(file) | |||
} | |||
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { | |||
var ( | |||
buddyInfo = []BuddyInfo{} | |||
scanner = bufio.NewScanner(r) | |||
bucketCount = -1 | |||
) | |||
for scanner.Scan() { | |||
var err error | |||
line := scanner.Text() | |||
parts := strings.Fields(line) | |||
if len(parts) < 4 { | |||
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") | |||
} | |||
node := strings.TrimRight(parts[1], ",") | |||
zone := strings.TrimRight(parts[3], ",") | |||
arraySize := len(parts[4:]) | |||
if bucketCount == -1 { | |||
bucketCount = arraySize | |||
} else { | |||
if bucketCount != arraySize { | |||
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) | |||
} | |||
} | |||
sizes := make([]float64, arraySize) | |||
for i := 0; i < arraySize; i++ { | |||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) | |||
if err != nil { | |||
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) | |||
} | |||
} | |||
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) | |||
} | |||
return buddyInfo, scanner.Err() | |||
} |
@@ -0,0 +1,45 @@ | |||
// Copyright 2014 Prometheus Team | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package procfs provides functions to retrieve system, kernel and process | |||
// metrics from the pseudo-filesystem proc. | |||
// | |||
// Example: | |||
// | |||
// package main | |||
// | |||
// import ( | |||
// "fmt" | |||
// "log" | |||
// | |||
// "github.com/prometheus/procfs" | |||
// ) | |||
// | |||
// func main() { | |||
// p, err := procfs.Self() | |||
// if err != nil { | |||
// log.Fatalf("could not get process: %s", err) | |||
// } | |||
// | |||
// stat, err := p.NewStat() | |||
// if err != nil { | |||
// log.Fatalf("could not get process stat: %s", err) | |||
// } | |||
// | |||
// fmt.Printf("command: %s\n", stat.Comm) | |||
// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) | |||
// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) | |||
// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) | |||
// } | |||
// | |||
package procfs |
@@ -0,0 +1,82 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package procfs | |||
import ( | |||
"fmt" | |||
"os" | |||
"path" | |||
"github.com/prometheus/procfs/nfs" | |||
"github.com/prometheus/procfs/xfs" | |||
) | |||
// FS represents the pseudo-filesystem proc, which provides an interface to | |||
// kernel data structures. | |||
type FS string | |||
// DefaultMountPoint is the common mount point of the proc filesystem. | |||
const DefaultMountPoint = "/proc" | |||
// NewFS returns a new FS mounted under the given mountPoint. It will error | |||
// if the mount point can't be read. | |||
func NewFS(mountPoint string) (FS, error) { | |||
info, err := os.Stat(mountPoint) | |||
if err != nil { | |||
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) | |||
} | |||
if !info.IsDir() { | |||
return "", fmt.Errorf("mount point %s is not a directory", mountPoint) | |||
} | |||
return FS(mountPoint), nil | |||
} | |||
// Path returns the path of the given subsystem relative to the procfs root. | |||
func (fs FS) Path(p ...string) string { | |||
return path.Join(append([]string{string(fs)}, p...)...) | |||
} | |||
// XFSStats retrieves XFS filesystem runtime statistics. | |||
func (fs FS) XFSStats() (*xfs.Stats, error) { | |||
f, err := os.Open(fs.Path("fs/xfs/stat")) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer f.Close() | |||
return xfs.ParseStats(f) | |||
} | |||
// NFSClientRPCStats retrieves NFS client RPC statistics. | |||
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { | |||
f, err := os.Open(fs.Path("net/rpc/nfs")) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer f.Close() | |||
return nfs.ParseClientRPCStats(f) | |||
} | |||
// NFSdServerRPCStats retrieves NFS daemon RPC statistics. | |||
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { | |||
f, err := os.Open(fs.Path("net/rpc/nfsd")) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer f.Close() | |||
return nfs.ParseServerRPCStats(f) | |||
} |
@@ -0,0 +1,59 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package util | |||
import ( | |||
"io/ioutil" | |||
"strconv" | |||
"strings" | |||
) | |||
// ParseUint32s parses a slice of strings into a slice of uint32s. | |||
func ParseUint32s(ss []string) ([]uint32, error) { | |||
us := make([]uint32, 0, len(ss)) | |||
for _, s := range ss { | |||
u, err := strconv.ParseUint(s, 10, 32) | |||
if err != nil { | |||
return nil, err | |||
} | |||
us = append(us, uint32(u)) | |||
} | |||
return us, nil | |||
} | |||
// ParseUint64s parses a slice of strings into a slice of uint64s. | |||
func ParseUint64s(ss []string) ([]uint64, error) { | |||
us := make([]uint64, 0, len(ss)) | |||
for _, s := range ss { | |||
u, err := strconv.ParseUint(s, 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
us = append(us, u) | |||
} | |||
return us, nil | |||
} | |||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it. | |||
func ReadUintFromFile(path string) (uint64, error) { | |||
data, err := ioutil.ReadFile(path) | |||
if err != nil { | |||
return 0, err | |||
} | |||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) | |||
} |
@@ -0,0 +1,45 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// +build !windows | |||
package util | |||
import ( | |||
"bytes" | |||
"os" | |||
"syscall" | |||
) | |||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. | |||
// https://github.com/prometheus/node_exporter/pull/728/files | |||
func SysReadFile(file string) (string, error) { | |||
f, err := os.Open(file) | |||
if err != nil { | |||
return "", err | |||
} | |||
defer f.Close() | |||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes | |||
// Go's ioutil.ReadFile implementation to poll forever. | |||
// | |||
// Since we either want to read data or bail immediately, do the simplest | |||
// possible read using syscall directly. | |||
b := make([]byte, 128) | |||
n, err := syscall.Read(int(f.Fd()), b) | |||
if err != nil { | |||
return "", err | |||
} | |||
return string(bytes.TrimSpace(b[:n])), nil | |||
} |
@@ -0,0 +1,259 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package procfs | |||
import ( | |||
"bufio" | |||
"encoding/hex" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"io/ioutil" | |||
"net" | |||
"os" | |||
"strconv" | |||
"strings" | |||
) | |||
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. | |||
type IPVSStats struct { | |||
// Total count of connections. | |||
Connections uint64 | |||
// Total incoming packages processed. | |||
IncomingPackets uint64 | |||
// Total outgoing packages processed. | |||
OutgoingPackets uint64 | |||
// Total incoming traffic. | |||
IncomingBytes uint64 | |||
// Total outgoing traffic. | |||
OutgoingBytes uint64 | |||
} | |||
// IPVSBackendStatus holds current metrics of one virtual / real address pair. | |||
type IPVSBackendStatus struct { | |||
// The local (virtual) IP address. | |||
LocalAddress net.IP | |||
// The remote (real) IP address. | |||
RemoteAddress net.IP | |||
// The local (virtual) port. | |||
LocalPort uint16 | |||
// The remote (real) port. | |||
RemotePort uint16 | |||
// The local firewall mark | |||
LocalMark string | |||
// The transport protocol (TCP, UDP). | |||
Proto string | |||
// The current number of active connections for this virtual/real address pair. | |||
ActiveConn uint64 | |||
// The current number of inactive connections for this virtual/real address pair. | |||
InactConn uint64 | |||
// The current weight of this virtual/real address pair. | |||
Weight uint64 | |||
} | |||
// NewIPVSStats reads the IPVS statistics. | |||
func NewIPVSStats() (IPVSStats, error) { | |||
fs, err := NewFS(DefaultMountPoint) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
return fs.NewIPVSStats() | |||
} | |||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. | |||
func (fs FS) NewIPVSStats() (IPVSStats, error) { | |||
file, err := os.Open(fs.Path("net/ip_vs_stats")) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
defer file.Close() | |||
return parseIPVSStats(file) | |||
} | |||
// parseIPVSStats performs the actual parsing of `ip_vs_stats`. | |||
func parseIPVSStats(file io.Reader) (IPVSStats, error) { | |||
var ( | |||
statContent []byte | |||
statLines []string | |||
statFields []string | |||
stats IPVSStats | |||
) | |||
statContent, err := ioutil.ReadAll(file) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
statLines = strings.SplitN(string(statContent), "\n", 4) | |||
if len(statLines) != 4 { | |||
return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") | |||
} | |||
statFields = strings.Fields(statLines[2]) | |||
if len(statFields) != 5 { | |||
return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") | |||
} | |||
stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) | |||
if err != nil { | |||
return IPVSStats{}, err | |||
} | |||
return stats, nil | |||
} | |||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. | |||
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { | |||
fs, err := NewFS(DefaultMountPoint) | |||
if err != nil { | |||
return []IPVSBackendStatus{}, err | |||
} | |||
return fs.NewIPVSBackendStatus() | |||
} | |||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. | |||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { | |||
file, err := os.Open(fs.Path("net/ip_vs")) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer file.Close() | |||
return parseIPVSBackendStatus(file) | |||
} | |||
func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { | |||
var ( | |||
status []IPVSBackendStatus | |||
scanner = bufio.NewScanner(file) | |||
proto string | |||
localMark string | |||
localAddress net.IP | |||
localPort uint16 | |||
err error | |||
) | |||
for scanner.Scan() { | |||
fields := strings.Fields(scanner.Text()) | |||
if len(fields) == 0 { | |||
continue | |||
} | |||
switch { | |||
case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": | |||
continue | |||
case fields[0] == "TCP" || fields[0] == "UDP": | |||
if len(fields) < 2 { | |||
continue | |||
} | |||
proto = fields[0] | |||
localMark = "" | |||
localAddress, localPort, err = parseIPPort(fields[1]) | |||
if err != nil { | |||
return nil, err | |||
} | |||
case fields[0] == "FWM": | |||
if len(fields) < 2 { | |||
continue | |||
} | |||
proto = fields[0] | |||
localMark = fields[1] | |||
localAddress = nil | |||
localPort = 0 | |||
case fields[0] == "->": | |||
if len(fields) < 6 { | |||
continue | |||
} | |||
remoteAddress, remotePort, err := parseIPPort(fields[1]) | |||
if err != nil { | |||
return nil, err | |||
} | |||
weight, err := strconv.ParseUint(fields[3], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
activeConn, err := strconv.ParseUint(fields[4], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
inactConn, err := strconv.ParseUint(fields[5], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
status = append(status, IPVSBackendStatus{ | |||
LocalAddress: localAddress, | |||
LocalPort: localPort, | |||
LocalMark: localMark, | |||
RemoteAddress: remoteAddress, | |||
RemotePort: remotePort, | |||
Proto: proto, | |||
Weight: weight, | |||
ActiveConn: activeConn, | |||
InactConn: inactConn, | |||
}) | |||
} | |||
} | |||
return status, nil | |||
} | |||
func parseIPPort(s string) (net.IP, uint16, error) { | |||
var ( | |||
ip net.IP | |||
err error | |||
) | |||
switch len(s) { | |||
case 13: | |||
ip, err = hex.DecodeString(s[0:8]) | |||
if err != nil { | |||
return nil, 0, err | |||
} | |||
case 46: | |||
ip = net.ParseIP(s[1:40]) | |||
if ip == nil { | |||
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) | |||
} | |||
default: | |||
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) | |||
} | |||
portString := s[len(s)-4:] | |||
if len(portString) != 4 { | |||
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) | |||
} | |||
port, err := strconv.ParseUint(portString, 16, 16) | |||
if err != nil { | |||
return nil, 0, err | |||
} | |||
return ip, uint16(port), nil | |||
} |
@@ -0,0 +1,151 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package procfs | |||
import ( | |||
"fmt" | |||
"io/ioutil" | |||
"regexp" | |||
"strconv" | |||
"strings" | |||
) | |||
var ( | |||
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) | |||
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) | |||
) | |||
// MDStat holds info parsed from /proc/mdstat. | |||
type MDStat struct { | |||
// Name of the device. | |||
Name string | |||
// activity-state of the device. | |||
ActivityState string | |||
// Number of active disks. | |||
DisksActive int64 | |||
// Total number of disks the device consists of. | |||
DisksTotal int64 | |||
// Number of blocks the device holds. | |||
BlocksTotal int64 | |||
// Number of blocks on the device that are in sync. | |||
BlocksSynced int64 | |||
} | |||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. | |||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { | |||
mdStatusFilePath := fs.Path("mdstat") | |||
content, err := ioutil.ReadFile(mdStatusFilePath) | |||
if err != nil { | |||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) | |||
} | |||
mdStates := []MDStat{} | |||
lines := strings.Split(string(content), "\n") | |||
for i, l := range lines { | |||
if l == "" { | |||
continue | |||
} | |||
if l[0] == ' ' { | |||
continue | |||
} | |||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { | |||
continue | |||
} | |||
mainLine := strings.Split(l, " ") | |||
if len(mainLine) < 3 { | |||
return mdStates, fmt.Errorf("error parsing mdline: %s", l) | |||
} | |||
mdName := mainLine[0] | |||
activityState := mainLine[2] | |||
if len(lines) <= i+3 { | |||
return mdStates, fmt.Errorf( | |||
"error parsing %s: too few lines for md device %s", | |||
mdStatusFilePath, | |||
mdName, | |||
) | |||
} | |||
active, total, size, err := evalStatusline(lines[i+1]) | |||
if err != nil { | |||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) | |||
} | |||
// j is the line number of the syncing-line. | |||
j := i + 2 | |||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line | |||
j = i + 3 | |||
} | |||
// If device is syncing at the moment, get the number of currently | |||
// synced bytes, otherwise that number equals the size of the device. | |||
syncedBlocks := size | |||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { | |||
syncedBlocks, err = evalBuildline(lines[j]) | |||
if err != nil { | |||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) | |||
} | |||
} | |||
mdStates = append(mdStates, MDStat{ | |||
Name: mdName, | |||
ActivityState: activityState, | |||
DisksActive: active, | |||
DisksTotal: total, | |||
BlocksTotal: size, | |||
BlocksSynced: syncedBlocks, | |||
}) | |||
} | |||
return mdStates, nil | |||
} | |||
func evalStatusline(statusline string) (active, total, size int64, err error) { | |||
matches := statuslineRE.FindStringSubmatch(statusline) | |||
if len(matches) != 4 { | |||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) | |||
} | |||
size, err = strconv.ParseInt(matches[1], 10, 64) | |||
if err != nil { | |||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) | |||
} | |||
total, err = strconv.ParseInt(matches[2], 10, 64) | |||
if err != nil { | |||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) | |||
} | |||
active, err = strconv.ParseInt(matches[3], 10, 64) | |||
if err != nil { | |||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) | |||
} | |||
return active, total, size, nil | |||
} | |||
func evalBuildline(buildline string) (syncedBlocks int64, err error) { | |||
matches := buildlineRE.FindStringSubmatch(buildline) | |||
if len(matches) != 2 { | |||
return 0, fmt.Errorf("unexpected buildline: %s", buildline) | |||
} | |||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) | |||
if err != nil { | |||
return 0, fmt.Errorf("%s in buildline: %s", err, buildline) | |||
} | |||
return syncedBlocks, nil | |||
} |
@@ -0,0 +1,606 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package procfs | |||
// While implementing parsing of /proc/[pid]/mountstats, this blog was used | |||
// heavily as a reference: | |||
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex | |||
// | |||
// Special thanks to Chris Siebenmann for all of his posts explaining the | |||
// various statistics available for NFS. | |||
import ( | |||
"bufio" | |||
"fmt" | |||
"io" | |||
"strconv" | |||
"strings" | |||
"time" | |||
) | |||
// Constants shared between multiple functions. | |||
const ( | |||
deviceEntryLen = 8 | |||
fieldBytesLen = 8 | |||
fieldEventsLen = 27 | |||
statVersion10 = "1.0" | |||
statVersion11 = "1.1" | |||
fieldTransport10TCPLen = 10 | |||
fieldTransport10UDPLen = 7 | |||
fieldTransport11TCPLen = 13 | |||
fieldTransport11UDPLen = 10 | |||
) | |||
// A Mount is a device mount parsed from /proc/[pid]/mountstats. | |||
type Mount struct { | |||
// Name of the device. | |||
Device string | |||
// The mount point of the device. | |||
Mount string | |||
// The filesystem type used by the device. | |||
Type string | |||
// If available additional statistics related to this Mount. | |||
// Use a type assertion to determine if additional statistics are available. | |||
Stats MountStats | |||
} | |||
// A MountStats is a type which contains detailed statistics for a specific | |||
// type of Mount. | |||
type MountStats interface { | |||
mountStats() | |||
} | |||
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. | |||
type MountStatsNFS struct { | |||
// The version of statistics provided. | |||
StatVersion string | |||
// The age of the NFS mount. | |||
Age time.Duration | |||
// Statistics related to byte counters for various operations. | |||
Bytes NFSBytesStats | |||
// Statistics related to various NFS event occurrences. | |||
Events NFSEventsStats | |||
// Statistics broken down by filesystem operation. | |||
Operations []NFSOperationStats | |||
// Statistics about the NFS RPC transport. | |||
Transport NFSTransportStats | |||
} | |||
// mountStats implements MountStats. | |||
func (m MountStatsNFS) mountStats() {} | |||
// A NFSBytesStats contains statistics about the number of bytes read and written | |||
// by an NFS client to and from an NFS server. | |||
type NFSBytesStats struct { | |||
// Number of bytes read using the read() syscall. | |||
Read uint64 | |||
// Number of bytes written using the write() syscall. | |||
Write uint64 | |||
// Number of bytes read using the read() syscall in O_DIRECT mode. | |||
DirectRead uint64 | |||
// Number of bytes written using the write() syscall in O_DIRECT mode. | |||
DirectWrite uint64 | |||
// Number of bytes read from the NFS server, in total. | |||
ReadTotal uint64 | |||
// Number of bytes written to the NFS server, in total. | |||
WriteTotal uint64 | |||
// Number of pages read directly via mmap()'d files. | |||
ReadPages uint64 | |||
// Number of pages written directly via mmap()'d files. | |||
WritePages uint64 | |||
} | |||
// A NFSEventsStats contains statistics about NFS event occurrences. | |||
type NFSEventsStats struct { | |||
// Number of times cached inode attributes are re-validated from the server. | |||
InodeRevalidate uint64 | |||
// Number of times cached dentry nodes are re-validated from the server. | |||
DnodeRevalidate uint64 | |||
// Number of times an inode cache is cleared. | |||
DataInvalidate uint64 | |||
// Number of times cached inode attributes are invalidated. | |||
AttributeInvalidate uint64 | |||
// Number of times files or directories have been open()'d. | |||
VFSOpen uint64 | |||
// Number of times a directory lookup has occurred. | |||
VFSLookup uint64 | |||
// Number of times permissions have been checked. | |||
VFSAccess uint64 | |||
// Number of updates (and potential writes) to pages. | |||
VFSUpdatePage uint64 | |||
// Number of pages read directly via mmap()'d files. | |||
VFSReadPage uint64 | |||
// Number of times a group of pages have been read. | |||
VFSReadPages uint64 | |||
// Number of pages written directly via mmap()'d files. | |||
VFSWritePage uint64 | |||
// Number of times a group of pages have been written. | |||
VFSWritePages uint64 | |||
// Number of times directory entries have been read with getdents(). | |||
VFSGetdents uint64 | |||
// Number of times attributes have been set on inodes. | |||
VFSSetattr uint64 | |||
// Number of pending writes that have been forcefully flushed to the server. | |||
VFSFlush uint64 | |||
// Number of times fsync() has been called on directories and files. | |||
VFSFsync uint64 | |||
// Number of times locking has been attempted on a file. | |||
VFSLock uint64 | |||
// Number of times files have been closed and released. | |||
VFSFileRelease uint64 | |||
// Unknown. Possibly unused. | |||
CongestionWait uint64 | |||
// Number of times files have been truncated. | |||
Truncation uint64 | |||
// Number of times a file has been grown due to writes beyond its existing end. | |||
WriteExtension uint64 | |||
// Number of times a file was removed while still open by another process. | |||
SillyRename uint64 | |||
// Number of times the NFS server gave less data than expected while reading. | |||
ShortRead uint64 | |||
// Number of times the NFS server wrote less data than expected while writing. | |||
ShortWrite uint64 | |||
// Number of times the NFS server indicated EJUKEBOX; retrieving data from | |||
// offline storage. | |||
JukeboxDelay uint64 | |||
// Number of NFS v4.1+ pNFS reads. | |||
PNFSRead uint64 | |||
// Number of NFS v4.1+ pNFS writes. | |||
PNFSWrite uint64 | |||
} | |||
// A NFSOperationStats contains statistics for a single operation. | |||
type NFSOperationStats struct { | |||
// The name of the operation. | |||
Operation string | |||
// Number of requests performed for this operation. | |||
Requests uint64 | |||
// Number of times an actual RPC request has been transmitted for this operation. | |||
Transmissions uint64 | |||
// Number of times a request has had a major timeout. | |||
MajorTimeouts uint64 | |||
// Number of bytes sent for this operation, including RPC headers and payload. | |||
BytesSent uint64 | |||
// Number of bytes received for this operation, including RPC headers and payload. | |||
BytesReceived uint64 | |||
// Duration all requests spent queued for transmission before they were sent. | |||
CumulativeQueueTime time.Duration | |||
// Duration it took to get a reply back after the request was transmitted. | |||
CumulativeTotalResponseTime time.Duration | |||
// Duration from when a request was enqueued to when it was completely handled. | |||
CumulativeTotalRequestTime time.Duration | |||
} | |||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and | |||
// responses. | |||
type NFSTransportStats struct { | |||
// The transport protocol used for the NFS mount. | |||
Protocol string | |||
// The local port used for the NFS mount. | |||
Port uint64 | |||
// Number of times the client has had to establish a connection from scratch | |||
// to the NFS server. | |||
Bind uint64 | |||
// Number of times the client has made a TCP connection to the NFS server. | |||
Connect uint64 | |||
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has | |||
// spent waiting for connections to the server to be established. | |||
ConnectIdleTime uint64 | |||
// Duration since the NFS mount last saw any RPC traffic. | |||
IdleTime time.Duration | |||
// Number of RPC requests for this mount sent to the NFS server. | |||
Sends uint64 | |||
// Number of RPC responses for this mount received from the NFS server. | |||
Receives uint64 | |||
// Number of times the NFS server sent a response with a transaction ID | |||
// unknown to this client. | |||
BadTransactionIDs uint64 | |||
// A running counter, incremented on each request as the current difference | |||
// ebetween sends and receives. | |||
CumulativeActiveRequests uint64 | |||
// A running counter, incremented on each request by the current backlog | |||
// queue size. | |||
CumulativeBacklog uint64 | |||
// Stats below only available with stat version 1.1. | |||
// Maximum number of simultaneously active RPC requests ever used. | |||
MaximumRPCSlotsUsed uint64 | |||
// A running counter, incremented on each request as the current size of the | |||
// sending queue. | |||
CumulativeSendingQueue uint64 | |||
// A running counter, incremented on each request as the current size of the | |||
// pending queue. | |||
CumulativePendingQueue uint64 | |||
} | |||
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice | |||
// of Mount structures containing detailed information about each mount. | |||
// If available, statistics for each mount are parsed as well. | |||
func parseMountStats(r io.Reader) ([]*Mount, error) { | |||
const ( | |||
device = "device" | |||
statVersionPrefix = "statvers=" | |||
nfs3Type = "nfs" | |||
nfs4Type = "nfs4" | |||
) | |||
var mounts []*Mount | |||
s := bufio.NewScanner(r) | |||
for s.Scan() { | |||
// Only look for device entries in this function | |||
ss := strings.Fields(string(s.Bytes())) | |||
if len(ss) == 0 || ss[0] != device { | |||
continue | |||
} | |||
m, err := parseMount(ss) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// Does this mount also possess statistics information? | |||
if len(ss) > deviceEntryLen { | |||
// Only NFSv3 and v4 are supported for parsing statistics | |||
if m.Type != nfs3Type && m.Type != nfs4Type { | |||
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) | |||
} | |||
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) | |||
stats, err := parseMountStatsNFS(s, statVersion) | |||
if err != nil { | |||
return nil, err | |||
} | |||
m.Stats = stats | |||
} | |||
mounts = append(mounts, m) | |||
} | |||
return mounts, s.Err() | |||
} | |||
// parseMount parses an entry in /proc/[pid]/mountstats in the format: | |||
// device [device] mounted on [mount] with fstype [type] | |||
func parseMount(ss []string) (*Mount, error) { | |||
if len(ss) < deviceEntryLen { | |||
return nil, fmt.Errorf("invalid device entry: %v", ss) | |||
} | |||
// Check for specific words appearing at specific indices to ensure | |||
// the format is consistent with what we expect | |||
format := []struct { | |||
i int | |||
s string | |||
}{ | |||
{i: 0, s: "device"}, | |||
{i: 2, s: "mounted"}, | |||
{i: 3, s: "on"}, | |||
{i: 5, s: "with"}, | |||
{i: 6, s: "fstype"}, | |||
} | |||
for _, f := range format { | |||
if ss[f.i] != f.s { | |||
return nil, fmt.Errorf("invalid device entry: %v", ss) | |||
} | |||
} | |||
return &Mount{ | |||
Device: ss[1], | |||
Mount: ss[4], | |||
Type: ss[7], | |||
}, nil | |||
} | |||
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information | |||
// related to NFS statistics. | |||
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { | |||
// Field indicators for parsing specific types of data | |||
const ( | |||
fieldAge = "age:" | |||
fieldBytes = "bytes:" | |||
fieldEvents = "events:" | |||
fieldPerOpStats = "per-op" | |||
fieldTransport = "xprt:" | |||
) | |||
stats := &MountStatsNFS{ | |||
StatVersion: statVersion, | |||
} | |||
for s.Scan() { | |||
ss := strings.Fields(string(s.Bytes())) | |||
if len(ss) == 0 { | |||
break | |||
} | |||
if len(ss) < 2 { | |||
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) | |||
} | |||
switch ss[0] { | |||
case fieldAge: | |||
// Age integer is in seconds | |||
d, err := time.ParseDuration(ss[1] + "s") | |||
if err != nil { | |||
return nil, err | |||
} | |||
stats.Age = d | |||
case fieldBytes: | |||
bstats, err := parseNFSBytesStats(ss[1:]) | |||
if err != nil { | |||
return nil, err | |||
} | |||
stats.Bytes = *bstats | |||
case fieldEvents: | |||
estats, err := parseNFSEventsStats(ss[1:]) | |||
if err != nil { | |||
return nil, err | |||
} | |||
stats.Events = *estats | |||
case fieldTransport: | |||
if len(ss) < 3 { | |||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) | |||
} | |||
tstats, err := parseNFSTransportStats(ss[1:], statVersion) | |||
if err != nil { | |||
return nil, err | |||
} | |||
stats.Transport = *tstats | |||
} | |||
// When encountering "per-operation statistics", we must break this | |||
// loop and parse them separately to ensure we can terminate parsing | |||
// before reaching another device entry; hence why this 'if' statement | |||
// is not just another switch case | |||
if ss[0] == fieldPerOpStats { | |||
break | |||
} | |||
} | |||
if err := s.Err(); err != nil { | |||
return nil, err | |||
} | |||
// NFS per-operation stats appear last before the next device entry | |||
perOpStats, err := parseNFSOperationStats(s) | |||
if err != nil { | |||
return nil, err | |||
} | |||
stats.Operations = perOpStats | |||
return stats, nil | |||
} | |||
// parseNFSBytesStats parses a NFSBytesStats line using an input set of | |||
// integer fields. | |||
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { | |||
if len(ss) != fieldBytesLen { | |||
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) | |||
} | |||
ns := make([]uint64, 0, fieldBytesLen) | |||
for _, s := range ss { | |||
n, err := strconv.ParseUint(s, 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
ns = append(ns, n) | |||
} | |||
return &NFSBytesStats{ | |||
Read: ns[0], | |||
Write: ns[1], | |||
DirectRead: ns[2], | |||
DirectWrite: ns[3], | |||
ReadTotal: ns[4], | |||
WriteTotal: ns[5], | |||
ReadPages: ns[6], | |||
WritePages: ns[7], | |||
}, nil | |||
} | |||
// parseNFSEventsStats parses a NFSEventsStats line using an input set of | |||
// integer fields. | |||
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { | |||
if len(ss) != fieldEventsLen { | |||
return nil, fmt.Errorf("invalid NFS events stats: %v", ss) | |||
} | |||
ns := make([]uint64, 0, fieldEventsLen) | |||
for _, s := range ss { | |||
n, err := strconv.ParseUint(s, 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
ns = append(ns, n) | |||
} | |||
return &NFSEventsStats{ | |||
InodeRevalidate: ns[0], | |||
DnodeRevalidate: ns[1], | |||
DataInvalidate: ns[2], | |||
AttributeInvalidate: ns[3], | |||
VFSOpen: ns[4], | |||
VFSLookup: ns[5], | |||
VFSAccess: ns[6], | |||
VFSUpdatePage: ns[7], | |||
VFSReadPage: ns[8], | |||
VFSReadPages: ns[9], | |||
VFSWritePage: ns[10], | |||
VFSWritePages: ns[11], | |||
VFSGetdents: ns[12], | |||
VFSSetattr: ns[13], | |||
VFSFlush: ns[14], | |||
VFSFsync: ns[15], | |||
VFSLock: ns[16], | |||
VFSFileRelease: ns[17], | |||
CongestionWait: ns[18], | |||
Truncation: ns[19], | |||
WriteExtension: ns[20], | |||
SillyRename: ns[21], | |||
ShortRead: ns[22], | |||
ShortWrite: ns[23], | |||
JukeboxDelay: ns[24], | |||
PNFSRead: ns[25], | |||
PNFSWrite: ns[26], | |||
}, nil | |||
} | |||
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning | |||
// additional information about per-operation statistics until an empty | |||
// line is reached. | |||
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { | |||
const ( | |||
// Number of expected fields in each per-operation statistics set | |||
numFields = 9 | |||
) | |||
var ops []NFSOperationStats | |||
for s.Scan() { | |||
ss := strings.Fields(string(s.Bytes())) | |||
if len(ss) == 0 { | |||
// Must break when reading a blank line after per-operation stats to | |||
// enable top-level function to parse the next device entry | |||
break | |||
} | |||
if len(ss) != numFields { | |||
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) | |||
} | |||
// Skip string operation name for integers | |||
ns := make([]uint64, 0, numFields-1) | |||
for _, st := range ss[1:] { | |||
n, err := strconv.ParseUint(st, 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
ns = append(ns, n) | |||
} | |||
ops = append(ops, NFSOperationStats{ | |||
Operation: strings.TrimSuffix(ss[0], ":"), | |||
Requests: ns[0], | |||
Transmissions: ns[1], | |||
MajorTimeouts: ns[2], | |||
BytesSent: ns[3], | |||
BytesReceived: ns[4], | |||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, | |||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, | |||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, | |||
}) | |||
} | |||
return ops, s.Err() | |||
} | |||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of | |||
// integer fields matched to a specific stats version. | |||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { | |||
// Extract the protocol field. It is the only string value in the line | |||
protocol := ss[0] | |||
ss = ss[1:] | |||
switch statVersion { | |||
case statVersion10: | |||
var expectedLength int | |||
if protocol == "tcp" { | |||
expectedLength = fieldTransport10TCPLen | |||
} else if protocol == "udp" { | |||
expectedLength = fieldTransport10UDPLen | |||
} else { | |||
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) | |||
} | |||
if len(ss) != expectedLength { | |||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) | |||
} | |||
case statVersion11: | |||
var expectedLength int | |||
if protocol == "tcp" { | |||
expectedLength = fieldTransport11TCPLen | |||
} else if protocol == "udp" { | |||
expectedLength = fieldTransport11UDPLen | |||
} else { | |||
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) | |||
} | |||
if len(ss) != expectedLength { | |||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) | |||
} | |||
default: | |||
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) | |||
} | |||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay | |||
// in a v1.0 response. Since the stat length is bigger for TCP stats, we use | |||
// the TCP length here. | |||
// | |||
// Note: slice length must be set to length of v1.1 stats to avoid a panic when | |||
// only v1.0 stats are present. | |||
// See: https://github.com/prometheus/node_exporter/issues/571. | |||
ns := make([]uint64, fieldTransport11TCPLen) | |||
for i, s := range ss { | |||
n, err := strconv.ParseUint(s, 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
ns[i] = n | |||
} | |||
// The fields differ depending on the transport protocol (TCP or UDP) | |||
// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt | |||
// | |||
// For the udp RPC transport there is no connection count, connect idle time, | |||
// or idle time (fields #3, #4, and #5); all other fields are the same. So | |||
// we set them to 0 here. | |||
if protocol == "udp" { | |||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) | |||
} | |||
return &NFSTransportStats{ | |||
Protocol: protocol, | |||
Port: ns[0], | |||
Bind: ns[1], | |||
Connect: ns[2], | |||
ConnectIdleTime: ns[3], | |||
IdleTime: time.Duration(ns[4]) * time.Second, | |||
Sends: ns[5], | |||
Receives: ns[6], | |||
BadTransactionIDs: ns[7], | |||
CumulativeActiveRequests: ns[8], | |||
CumulativeBacklog: ns[9], | |||
MaximumRPCSlotsUsed: ns[10], | |||
CumulativeSendingQueue: ns[11], | |||
CumulativePendingQueue: ns[12], | |||
}, nil | |||
} |
@@ -0,0 +1,216 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package procfs | |||
import ( | |||
"bufio" | |||
"errors" | |||
"os" | |||
"sort" | |||
"strconv" | |||
"strings" | |||
) | |||
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. | |||
type NetDevLine struct { | |||
Name string `json:"name"` // The name of the interface. | |||
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. | |||
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. | |||
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. | |||
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. | |||
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. | |||
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. | |||
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. | |||
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. | |||
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. | |||
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. | |||
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. | |||
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. | |||
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. | |||
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. | |||
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. | |||
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. | |||
} | |||
// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys | |||
// are interface names. | |||
type NetDev map[string]NetDevLine | |||
// NewNetDev returns kernel/system statistics read from /proc/net/dev. | |||
func NewNetDev() (NetDev, error) { | |||
fs, err := NewFS(DefaultMountPoint) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return fs.NewNetDev() | |||
} | |||
// NewNetDev returns kernel/system statistics read from /proc/net/dev. | |||
func (fs FS) NewNetDev() (NetDev, error) { | |||
return newNetDev(fs.Path("net/dev")) | |||
} | |||
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. | |||
func (p Proc) NewNetDev() (NetDev, error) { | |||
return newNetDev(p.path("net/dev")) | |||
} | |||
// newNetDev creates a new NetDev from the contents of the given file. | |||
func newNetDev(file string) (NetDev, error) { | |||
f, err := os.Open(file) | |||
if err != nil { | |||
return NetDev{}, err | |||
} | |||
defer f.Close() | |||
nd := NetDev{} | |||
s := bufio.NewScanner(f) | |||
for n := 0; s.Scan(); n++ { | |||
// Skip the 2 header lines. | |||
if n < 2 { | |||
continue | |||
} | |||
line, err := nd.parseLine(s.Text()) | |||
if err != nil { | |||
return nd, err | |||
} | |||
nd[line.Name] = *line | |||
} | |||
return nd, s.Err() | |||
} | |||
// parseLine parses a single line from the /proc/net/dev file. Header lines | |||
// must be filtered prior to calling this method. | |||
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { | |||
parts := strings.SplitN(rawLine, ":", 2) | |||
if len(parts) != 2 { | |||
return nil, errors.New("invalid net/dev line, missing colon") | |||
} | |||
fields := strings.Fields(strings.TrimSpace(parts[1])) | |||
var err error | |||
line := &NetDevLine{} | |||
// Interface Name | |||
line.Name = strings.TrimSpace(parts[0]) | |||
if line.Name == "" { | |||
return nil, errors.New("invalid net/dev line, empty interface name") | |||
} | |||
// RX | |||
line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// TX | |||
line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return line, nil | |||
} | |||
// Total aggregates the values across interfaces and returns a new NetDevLine. | |||
// The Name field will be a sorted comma separated list of interface names. | |||
func (nd NetDev) Total() NetDevLine { | |||
total := NetDevLine{} | |||
names := make([]string, 0, len(nd)) | |||
for _, ifc := range nd { | |||
names = append(names, ifc.Name) | |||
total.RxBytes += ifc.RxBytes | |||
total.RxPackets += ifc.RxPackets | |||
total.RxPackets += ifc.RxPackets | |||
total.RxErrors += ifc.RxErrors | |||
total.RxDropped += ifc.RxDropped | |||
total.RxFIFO += ifc.RxFIFO | |||
total.RxFrame += ifc.RxFrame | |||
total.RxCompressed += ifc.RxCompressed | |||
total.RxMulticast += ifc.RxMulticast | |||
total.TxBytes += ifc.TxBytes | |||
total.TxPackets += ifc.TxPackets | |||
total.TxErrors += ifc.TxErrors | |||
total.TxDropped += ifc.TxDropped | |||
total.TxFIFO += ifc.TxFIFO | |||
total.TxCollisions += ifc.TxCollisions | |||
total.TxCarrier += ifc.TxCarrier | |||
total.TxCompressed += ifc.TxCompressed | |||
} | |||
sort.Strings(names) | |||
total.Name = strings.Join(names, ", ") | |||
return total | |||
} |
@@ -0,0 +1,263 @@ | |||
// Copyright 2018 The Prometheus Authors | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package nfs implements parsing of /proc/net/rpc/nfsd. | |||
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ | |||
package nfs | |||
// ReplyCache models the "rc" line. | |||
type ReplyCache struct { | |||
Hits uint64 | |||
Misses uint64 | |||
NoCache uint64 | |||
} | |||
// FileHandles models the "fh" line. | |||
type FileHandles struct { | |||
Stale uint64 | |||
TotalLookups uint64 | |||
AnonLookups uint64 | |||
DirNoCache uint64 | |||
NoDirNoCache uint64 | |||
} | |||
// InputOutput models the "io" line. | |||
type InputOutput struct { | |||
Read uint64 | |||
Write uint64 | |||
} | |||
// Threads models the "th" line. | |||
type Threads struct { | |||
Threads uint64 | |||
FullCnt uint64 | |||
} | |||
// ReadAheadCache models the "ra" line. | |||
type ReadAheadCache struct { | |||
CacheSize uint64 | |||
CacheHistogram []uint64 | |||
NotFound uint64 | |||
} | |||
// Network models the "net" line. | |||
type Network struct { | |||
NetCount uint64 | |||
UDPCount uint64 | |||
TCPCount uint64 | |||
TCPConnect uint64 | |||
} | |||
// ClientRPC models the nfs "rpc" line. | |||
type ClientRPC struct { | |||
RPCCount uint64 | |||
Retransmissions uint64 | |||
AuthRefreshes uint64 | |||
} | |||
// ServerRPC models the nfsd "rpc" line. | |||
type ServerRPC struct { | |||
RPCCount uint64 | |||
BadCnt uint64 | |||
BadFmt uint64 | |||
BadAuth uint64 | |||
BadcInt uint64 | |||
} | |||
// V2Stats models the "proc2" line. | |||
type V2Stats struct { | |||
Null uint64 | |||
GetAttr uint64 | |||
SetAttr uint64 | |||
Root uint64 | |||
Lookup uint64 | |||
ReadLink uint64 | |||
Read uint64 | |||
WrCache uint64 | |||
Write uint64 | |||
Create uint64 | |||
Remove uint64 | |||
Rename uint64 | |||
Link uint64 | |||
SymLink uint64 | |||
MkDir uint64 | |||
RmDir uint64 | |||
ReadDir uint64 | |||
FsStat uint64 | |||
} | |||
// V3Stats models the "proc3" line. | |||
type V3Stats struct { | |||
Null uint64 | |||
GetAttr uint64 | |||
SetAttr uint64 | |||
Lookup uint64 | |||
Access uint64 | |||
ReadLink uint64 | |||
Read uint64 | |||
Write uint64 | |||
Create uint64 | |||
MkDir uint64 | |||
SymLink uint64 | |||
MkNod uint64 | |||
Remove uint64 | |||
RmDir uint64 | |||
Rename uint64 | |||
Link uint64 | |||
ReadDir uint64 | |||
ReadDirPlus uint64 | |||
FsStat uint64 | |||
FsInfo uint64 | |||
PathConf uint64 | |||
Commit uint64 | |||
} | |||
// ClientV4Stats models the nfs "proc4" line. | |||
type ClientV4Stats struct { | |||
Null uint64 | |||
Read uint64 | |||
Write uint64 | |||
Commit uint64 | |||
Open uint64 | |||
OpenConfirm uint64 | |||
OpenNoattr uint64 | |||
OpenDowngrade uint64 | |||
Close uint64 | |||
Setattr uint64 | |||
FsInfo uint64 | |||
Renew uint64 | |||
SetClientID uint64 | |||
SetClientIDConfirm uint64 | |||
Lock uint64 | |||
Lockt uint64 | |||
Locku uint64 | |||
Access uint64 | |||
Getattr uint64 | |||
Lookup uint64 | |||
LookupRoot uint64 | |||
Remove uint64 | |||
Rename uint64 | |||
Link uint64 | |||
Symlink uint64 | |||
Create uint64 | |||
Pathconf uint64 | |||
StatFs uint64 | |||
ReadLink uint64 | |||
ReadDir uint64 | |||
ServerCaps uint64 | |||
DelegReturn uint64 | |||
GetACL uint64 | |||
SetACL uint64 | |||
FsLocations uint64 | |||
ReleaseLockowner uint64 | |||
Secinfo uint64 | |||
FsidPresent uint64 | |||
ExchangeID uint64 | |||
CreateSession uint64 | |||
DestroySession uint64 | |||
Sequence uint64 | |||
GetLeaseTime uint64 | |||
ReclaimComplete uint64 | |||
LayoutGet uint64 | |||
GetDeviceInfo uint64 | |||
LayoutCommit uint64 | |||
LayoutReturn uint64 | |||
SecinfoNoName uint64 | |||
TestStateID uint64 | |||
FreeStateID uint64 | |||
GetDeviceList uint64 | |||
BindConnToSession uint64 | |||
DestroyClientID uint64 | |||
Seek uint64 | |||
Allocate uint64 | |||
DeAllocate uint64 | |||
LayoutStats uint64 | |||
Clone uint64 | |||
} | |||
// ServerV4Stats models the nfsd "proc4" line. | |||
type ServerV4Stats struct { | |||
Null uint64 | |||
Compound uint64 | |||
} | |||
// V4Ops models the "proc4ops" line: NFSv4 operations | |||
// Variable list, see: | |||
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) | |||
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) | |||
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) | |||
type V4Ops struct { | |||
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? | |||
Op0Unused uint64 | |||
Op1Unused uint64 | |||
Op2Future uint64 | |||
Access uint64 | |||
Close uint64 | |||
Commit uint64 | |||
Create uint64 | |||
DelegPurge uint64 | |||
DelegReturn uint64 | |||
GetAttr uint64 | |||
GetFH uint64 | |||
Link uint64 | |||
Lock uint64 | |||
Lockt uint64 | |||
Locku uint64 | |||
Lookup uint64 | |||
LookupRoot uint64 | |||
Nverify uint64 | |||
Open uint64 | |||
OpenAttr uint64 | |||
OpenConfirm uint64 | |||
OpenDgrd uint64 | |||
PutFH uint64 | |||
PutPubFH uint64 | |||
PutRootFH uint64 | |||
Read uint64 | |||
ReadDir uint64 | |||
ReadLink uint64 | |||
Remove uint64 | |||
Rename uint64 | |||
Renew uint64 | |||
RestoreFH uint64 | |||
SaveFH uint64 | |||
SecInfo uint64 | |||
SetAttr uint64 | |||
Verify uint64 | |||
Write uint64 | |||
RelLockOwner uint64 | |||
} | |||
// ClientRPCStats models all stats from /proc/net/rpc/nfs. | |||
type ClientRPCStats struct { | |||
Network Network | |||
ClientRPC ClientRPC | |||
V2Stats V2Stats | |||
V3Stats V3Stats | |||
ClientV4Stats ClientV4Stats | |||
} | |||
// ServerRPCStats models all stats from /proc/net/rpc/nfsd. | |||
type ServerRPCStats struct { | |||
ReplyCache ReplyCache | |||
FileHandles FileHandles | |||
InputOutput InputOutput | |||
Threads Threads | |||
ReadAheadCache ReadAheadCache | |||
Network Network | |||
ServerRPC ServerRPC | |||
V2Stats V2Stats | |||
V3Stats V3Stats | |||
ServerV4Stats ServerV4Stats | |||
V4Ops V4Ops | |||
} |