Update containerd dependencies
This updates the containerd dependencies to match the versions used by the vendored containerd version Signed-off-by: Sebastiaan van Stijn <github@gone.nl> (cherry picked from commit 31a9c9e79101cdf38d383104afbc1b48ede75291) Signed-off-by: Sebastiaan van Stijn <github@gone.nl> Upstream-commit: 5070e418b806cc96ad0f5b3ac32c8d416ff8449a Component: engine
This commit is contained in:
@ -76,7 +76,7 @@ google.golang.org/grpc v1.12.0
|
||||
|
||||
# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal
|
||||
github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
|
||||
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce # v1.0.1-43-gd810dbc
|
||||
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
|
||||
@ -144,8 +144,8 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
|
||||
github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d # v0.8.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
|
||||
# cli
|
||||
github.com/spf13/cobra v0.0.3
|
||||
@ -156,7 +156,7 @@ github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.
|
||||
# metrics
|
||||
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
|
||||
|
||||
github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
|
||||
github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
|
||||
|
||||
|
||||
# archive/tar (for Go 1.10, see https://github.com/golang/go/issues/24787)
|
||||
|
||||
20
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
20
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
generated
vendored
@ -3,6 +3,8 @@
|
||||
[](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
|
||||
[](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
|
||||
[](LICENSE)
|
||||
|
||||
[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
|
||||
@ -36,7 +38,7 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
// After all your registrations, make sure all of the Prometheus metrics are initialized.
|
||||
grpc_prometheus.Register(myServer)
|
||||
// Register Prometheus metrics handler.
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
...
|
||||
```
|
||||
|
||||
@ -47,8 +49,8 @@ import "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
...
|
||||
clientConn, err = grpc.Dial(
|
||||
address,
|
||||
grpc.WithUnaryInterceptor(UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(StreamClientInterceptor)
|
||||
grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
|
||||
)
|
||||
client = pb_testproto.NewTestServiceClient(clientConn)
|
||||
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
|
||||
@ -116,7 +118,7 @@ each of the 20 messages sent back, a counter will be incremented:
|
||||
grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
|
||||
```
|
||||
|
||||
After the call completes, it's status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||
After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go))
|
||||
and the relevant call labels increment the `grpc_server_handled_total` counter.
|
||||
|
||||
```jsoniq
|
||||
@ -126,8 +128,8 @@ grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mw
|
||||
## Histograms
|
||||
|
||||
[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
|
||||
to measure latency distributions of your RPCs. However since it is bad practice to have metrics
|
||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels))
|
||||
to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
|
||||
of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
|
||||
the latency monitoring metrics are disabled by default. To enable them please call the following
|
||||
in your server initialization code:
|
||||
|
||||
@ -135,8 +137,8 @@ in your server initialization code:
|
||||
grpc_prometheus.EnableHandlingTimeHistogram()
|
||||
```
|
||||
|
||||
After the call completes, it's handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
variable `grpc_server_handling_seconds`. It contains three sub-metrics:
|
||||
After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
|
||||
|
||||
* `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method
|
||||
* `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for
|
||||
@ -166,7 +168,7 @@ grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_se
|
||||
|
||||
## Useful query examples
|
||||
|
||||
Prometheus philosophy is to provide the most detailed metrics possible to the monitoring system, and
|
||||
Prometheus philosophy is to provide raw metrics to the monitoring system, and
|
||||
let the aggregations be handled there. The verbosity of above metrics make it possible to have that
|
||||
flexibility. Here's a couple of useful monitoring queries:
|
||||
|
||||
|
||||
85
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
generated
vendored
85
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
generated
vendored
@ -6,67 +6,34 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
monitor := newClientReporter(Unary, method)
|
||||
monitor.SentMessage()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
if err != nil {
|
||||
monitor.ReceivedMessage()
|
||||
}
|
||||
monitor.Handled(grpc.Code(err))
|
||||
return err
|
||||
var (
|
||||
// DefaultClientMetrics is the default instance of ClientMetrics. It is
|
||||
// intended to be used in conjunction the default Prometheus metrics
|
||||
// registry.
|
||||
DefaultClientMetrics = NewClientMetrics()
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
|
||||
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
|
||||
prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
|
||||
prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
|
||||
prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
|
||||
}
|
||||
|
||||
// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
monitor := newClientReporter(clientStreamType(desc), method)
|
||||
clientStream, err := streamer(ctx, desc, cc, method, opts...)
|
||||
if err != nil {
|
||||
monitor.Handled(grpc.Code(err))
|
||||
return nil, err
|
||||
}
|
||||
return &monitoredClientStream{clientStream, monitor}, nil
|
||||
}
|
||||
|
||||
func clientStreamType(desc *grpc.StreamDesc) grpcType {
|
||||
if desc.ClientStreams && !desc.ServerStreams {
|
||||
return ClientStream
|
||||
} else if !desc.ClientStreams && desc.ServerStreams {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredClientStream struct {
|
||||
grpc.ClientStream
|
||||
monitor *clientReporter
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) SendMsg(m interface{}) error {
|
||||
err := s.ClientStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
|
||||
err := s.ClientStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
} else if err == io.EOF {
|
||||
s.monitor.Handled(codes.OK)
|
||||
} else {
|
||||
s.monitor.Handled(grpc.Code(err))
|
||||
}
|
||||
return err
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of
|
||||
// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
|
||||
// query. This function acts on the DefaultClientMetrics variable and the
|
||||
// default Prometheus metrics registry.
|
||||
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
|
||||
prom.Register(DefaultClientMetrics.clientHandledHistogram)
|
||||
}
|
||||
|
||||
170
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
Normal file
170
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ClientMetrics represents a collection of metrics to be registered on a
|
||||
// Prometheus metrics registry for a gRPC client.
|
||||
type ClientMetrics struct {
|
||||
clientStartedCounter *prom.CounterVec
|
||||
clientHandledCounter *prom.CounterVec
|
||||
clientStreamMsgReceived *prom.CounterVec
|
||||
clientStreamMsgSent *prom.CounterVec
|
||||
clientHandledHistogramEnabled bool
|
||||
clientHandledHistogramOpts prom.HistogramOpts
|
||||
clientHandledHistogram *prom.HistogramVec
|
||||
}
|
||||
|
||||
// NewClientMetrics returns a ClientMetrics object. Use a new instance of
|
||||
// ClientMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ClientMetrics{
|
||||
clientStartedCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_started_total",
|
||||
Help: "Total number of RPCs started on the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_handled_total",
|
||||
Help: "Total number of RPCs completed by the client, regardless of success or failure.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
|
||||
clientStreamMsgReceived: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received by the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientStreamMsgSent: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_client_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the client.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
|
||||
clientHandledHistogramEnabled: false,
|
||||
clientHandledHistogramOpts: prom.HistogramOpts{
|
||||
Name: "grpc_client_handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
|
||||
Buckets: prom.DefBuckets,
|
||||
},
|
||||
clientHandledHistogram: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent.
|
||||
func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
|
||||
m.clientStartedCounter.Describe(ch)
|
||||
m.clientHandledCounter.Describe(ch)
|
||||
m.clientStreamMsgReceived.Describe(ch)
|
||||
m.clientStreamMsgSent.Describe(ch)
|
||||
if m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent.
|
||||
func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
|
||||
m.clientStartedCounter.Collect(ch)
|
||||
m.clientHandledCounter.Collect(ch)
|
||||
m.clientStreamMsgReceived.Collect(ch)
|
||||
m.clientStreamMsgSent.Collect(ch)
|
||||
if m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
|
||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
|
||||
func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&m.clientHandledHistogramOpts)
|
||||
}
|
||||
if !m.clientHandledHistogramEnabled {
|
||||
m.clientHandledHistogram = prom.NewHistogramVec(
|
||||
m.clientHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
}
|
||||
m.clientHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
monitor := newClientReporter(m, Unary, method)
|
||||
monitor.SentMessage()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
if err != nil {
|
||||
monitor.ReceivedMessage()
|
||||
}
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
monitor := newClientReporter(m, clientStreamType(desc), method)
|
||||
clientStream, err := streamer(ctx, desc, cc, method, opts...)
|
||||
if err != nil {
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return nil, err
|
||||
}
|
||||
return &monitoredClientStream{clientStream, monitor}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func clientStreamType(desc *grpc.StreamDesc) grpcType {
|
||||
if desc.ClientStreams && !desc.ServerStreams {
|
||||
return ClientStream
|
||||
} else if !desc.ClientStreams && desc.ServerStreams {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredClientStream struct {
|
||||
grpc.ClientStream
|
||||
monitor *clientReporter
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) SendMsg(m interface{}) error {
|
||||
err := s.ClientStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
|
||||
err := s.ClientStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
} else if err == io.EOF {
|
||||
s.monitor.Handled(codes.OK)
|
||||
} else {
|
||||
st, _ := status.FromError(err)
|
||||
s.monitor.Handled(st.Code())
|
||||
}
|
||||
return err
|
||||
}
|
||||
91
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
generated
vendored
91
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
generated
vendored
@ -7,105 +7,40 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
clientStartedCounter = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "client",
|
||||
Name: "started_total",
|
||||
Help: "Total number of RPCs started on the client.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"})
|
||||
|
||||
clientHandledCounter = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "client",
|
||||
Name: "handled_total",
|
||||
Help: "Total number of RPCs completed by the client, regardless of success or failure.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
|
||||
|
||||
clientStreamMsgReceived = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "client",
|
||||
Name: "msg_received_total",
|
||||
Help: "Total number of RPC stream messages received by the client.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"})
|
||||
|
||||
clientStreamMsgSent = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "client",
|
||||
Name: "msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the client.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"})
|
||||
|
||||
clientHandledHistogramEnabled = false
|
||||
clientHandledHistogramOpts = prom.HistogramOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "client",
|
||||
Name: "handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
|
||||
Buckets: prom.DefBuckets,
|
||||
}
|
||||
clientHandledHistogram *prom.HistogramVec
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(clientStartedCounter)
|
||||
prom.MustRegister(clientHandledCounter)
|
||||
prom.MustRegister(clientStreamMsgReceived)
|
||||
prom.MustRegister(clientStreamMsgSent)
|
||||
}
|
||||
|
||||
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
|
||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
|
||||
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&clientHandledHistogramOpts)
|
||||
}
|
||||
if !clientHandledHistogramEnabled {
|
||||
clientHandledHistogram = prom.NewHistogramVec(
|
||||
clientHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
prom.Register(clientHandledHistogram)
|
||||
}
|
||||
clientHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
type clientReporter struct {
|
||||
metrics *ClientMetrics
|
||||
rpcType grpcType
|
||||
serviceName string
|
||||
methodName string
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter {
|
||||
r := &clientReporter{rpcType: rpcType}
|
||||
if clientHandledHistogramEnabled {
|
||||
func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
|
||||
r := &clientReporter{
|
||||
metrics: m,
|
||||
rpcType: rpcType,
|
||||
}
|
||||
if r.metrics.clientHandledHistogramEnabled {
|
||||
r.startTime = time.Now()
|
||||
}
|
||||
r.serviceName, r.methodName = splitMethodName(fullMethod)
|
||||
clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *clientReporter) ReceivedMessage() {
|
||||
clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *clientReporter) SentMessage() {
|
||||
clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *clientReporter) Handled(code codes.Code) {
|
||||
clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
|
||||
if clientHandledHistogramEnabled {
|
||||
clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
|
||||
r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
|
||||
if r.metrics.clientHandledHistogramEnabled {
|
||||
r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
41
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
generated
vendored
Normal file
41
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// A CounterOption lets you add options to Counter metrics using With* funcs.
|
||||
type CounterOption func(*prom.CounterOpts)
|
||||
|
||||
type counterOptions []CounterOption
|
||||
|
||||
func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
|
||||
for _, f := range co {
|
||||
f(&o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// WithConstLabels allows you to add ConstLabels to Counter metrics.
|
||||
func WithConstLabels(labels prom.Labels) CounterOption {
|
||||
return func(o *prom.CounterOpts) {
|
||||
o.ConstLabels = labels
|
||||
}
|
||||
}
|
||||
|
||||
// A HistogramOption lets you add options to Histogram metrics using With*
|
||||
// funcs.
|
||||
type HistogramOption func(*prom.HistogramOpts)
|
||||
|
||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
|
||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) { o.Buckets = buckets }
|
||||
}
|
||||
|
||||
// WithHistogramConstLabels allows you to add custom ConstLabels to
|
||||
// histograms metrics.
|
||||
func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) {
|
||||
o.ConstLabels = labels
|
||||
}
|
||||
}
|
||||
92
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
generated
vendored
92
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
generated
vendored
@ -6,69 +6,43 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// PreregisterServices takes a gRPC server and pre-initializes all counters to 0.
|
||||
// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have
|
||||
// been registered with the server.
|
||||
var (
|
||||
// DefaultServerMetrics is the default instance of ServerMetrics. It is
|
||||
// intended to be used in conjunction the default Prometheus metrics
|
||||
// registry.
|
||||
DefaultServerMetrics = NewServerMetrics()
|
||||
|
||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
|
||||
|
||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
|
||||
prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
|
||||
prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
|
||||
prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
|
||||
}
|
||||
|
||||
// Register takes a gRPC server and pre-initializes all counters to 0. This
|
||||
// allows for easier monitoring in Prometheus (no missing metrics), and should
|
||||
// be called *after* all services have been registered with the server. This
|
||||
// function acts on the DefaultServerMetrics variable.
|
||||
func Register(server *grpc.Server) {
|
||||
serviceInfo := server.GetServiceInfo()
|
||||
for serviceName, info := range serviceInfo {
|
||||
for _, mInfo := range info.Methods {
|
||||
preRegisterMethod(serviceName, &mInfo)
|
||||
}
|
||||
}
|
||||
DefaultServerMetrics.InitializeMetrics(server)
|
||||
}
|
||||
|
||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
monitor := newServerReporter(Unary, info.FullMethod)
|
||||
monitor.ReceivedMessage()
|
||||
resp, err := handler(ctx, req)
|
||||
monitor.Handled(grpc.Code(err))
|
||||
if err == nil {
|
||||
monitor.SentMessage()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
monitor := newServerReporter(streamRpcType(info), info.FullMethod)
|
||||
err := handler(srv, &monitoredServerStream{ss, monitor})
|
||||
monitor.Handled(grpc.Code(err))
|
||||
return err
|
||||
}
|
||||
|
||||
func streamRpcType(info *grpc.StreamServerInfo) grpcType {
|
||||
if info.IsClientStream && !info.IsServerStream {
|
||||
return ClientStream
|
||||
} else if !info.IsClientStream && info.IsServerStream {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredServerStream struct {
|
||||
grpc.ServerStream
|
||||
monitor *serverReporter
|
||||
}
|
||||
|
||||
func (s *monitoredServerStream) SendMsg(m interface{}) error {
|
||||
err := s.ServerStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredServerStream) RecvMsg(m interface{}) error {
|
||||
err := s.ServerStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
}
|
||||
return err
|
||||
// EnableHandlingTimeHistogram turns on recording of handling time
|
||||
// of RPCs. Histogram metrics can be very expensive for Prometheus
|
||||
// to retain and query. This function acts on the DefaultServerMetrics
|
||||
// variable and the default Prometheus metrics registry.
|
||||
func EnableHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
|
||||
prom.Register(DefaultServerMetrics.serverHandledHistogram)
|
||||
}
|
||||
|
||||
185
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
generated
vendored
Normal file
185
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package grpc_prometheus
|
||||
|
||||
import (
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ServerMetrics represents a collection of metrics to be registered on a
|
||||
// Prometheus metrics registry for a gRPC server.
|
||||
type ServerMetrics struct {
|
||||
serverStartedCounter *prom.CounterVec
|
||||
serverHandledCounter *prom.CounterVec
|
||||
serverStreamMsgReceived *prom.CounterVec
|
||||
serverStreamMsgSent *prom.CounterVec
|
||||
serverHandledHistogramEnabled bool
|
||||
serverHandledHistogramOpts prom.HistogramOpts
|
||||
serverHandledHistogram *prom.HistogramVec
|
||||
}
|
||||
|
||||
// NewServerMetrics returns a ServerMetrics object. Use a new instance of
|
||||
// ServerMetrics when not using the default Prometheus metrics registry, for
|
||||
// example when wanting to control which metrics are added to a registry as
|
||||
// opposed to automatically adding metrics via init functions.
|
||||
func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
|
||||
opts := counterOptions(counterOpts)
|
||||
return &ServerMetrics{
|
||||
serverStartedCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_started_total",
|
||||
Help: "Total number of RPCs started on the server.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverHandledCounter: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_handled_total",
|
||||
Help: "Total number of RPCs completed on the server, regardless of success or failure.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
|
||||
serverStreamMsgReceived: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_msg_received_total",
|
||||
Help: "Total number of RPC stream messages received on the server.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverStreamMsgSent: prom.NewCounterVec(
|
||||
opts.apply(prom.CounterOpts{
|
||||
Name: "grpc_server_msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the server.",
|
||||
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
|
||||
serverHandledHistogramEnabled: false,
|
||||
serverHandledHistogramOpts: prom.HistogramOpts{
|
||||
Name: "grpc_server_handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
|
||||
Buckets: prom.DefBuckets,
|
||||
},
|
||||
serverHandledHistogram: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// EnableHandlingTimeHistogram enables histograms being registered when
|
||||
// registering the ServerMetrics on a Prometheus registry. Histograms can be
|
||||
// expensive on Prometheus servers. It takes options to configure histogram
|
||||
// options such as the defined buckets.
|
||||
func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&m.serverHandledHistogramOpts)
|
||||
}
|
||||
if !m.serverHandledHistogramEnabled {
|
||||
m.serverHandledHistogram = prom.NewHistogramVec(
|
||||
m.serverHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
}
|
||||
m.serverHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent.
|
||||
func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
|
||||
m.serverStartedCounter.Describe(ch)
|
||||
m.serverHandledCounter.Describe(ch)
|
||||
m.serverStreamMsgReceived.Describe(ch)
|
||||
m.serverStreamMsgSent.Describe(ch)
|
||||
if m.serverHandledHistogramEnabled {
|
||||
m.serverHandledHistogram.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent.
|
||||
func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
|
||||
m.serverStartedCounter.Collect(ch)
|
||||
m.serverHandledCounter.Collect(ch)
|
||||
m.serverStreamMsgReceived.Collect(ch)
|
||||
m.serverStreamMsgSent.Collect(ch)
|
||||
if m.serverHandledHistogramEnabled {
|
||||
m.serverHandledHistogram.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
|
||||
func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
monitor := newServerReporter(m, Unary, info.FullMethod)
|
||||
monitor.ReceivedMessage()
|
||||
resp, err := handler(ctx, req)
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
if err == nil {
|
||||
monitor.SentMessage()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
|
||||
func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
|
||||
err := handler(srv, &monitoredServerStream{ss, monitor})
|
||||
st, _ := status.FromError(err)
|
||||
monitor.Handled(st.Code())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeMetrics initializes all metrics, with their appropriate null
|
||||
// value, for all gRPC methods registered on a gRPC server. This is useful, to
|
||||
// ensure that all metrics exist when collecting and querying.
|
||||
func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
|
||||
serviceInfo := server.GetServiceInfo()
|
||||
for serviceName, info := range serviceInfo {
|
||||
for _, mInfo := range info.Methods {
|
||||
preRegisterMethod(m, serviceName, &mInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func streamRPCType(info *grpc.StreamServerInfo) grpcType {
|
||||
if info.IsClientStream && !info.IsServerStream {
|
||||
return ClientStream
|
||||
} else if !info.IsClientStream && info.IsServerStream {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
|
||||
type monitoredServerStream struct {
|
||||
grpc.ServerStream
|
||||
monitor *serverReporter
|
||||
}
|
||||
|
||||
func (s *monitoredServerStream) SendMsg(m interface{}) error {
|
||||
err := s.ServerStream.SendMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.SentMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *monitoredServerStream) RecvMsg(m interface{}) error {
|
||||
err := s.ServerStream.RecvMsg(m)
|
||||
if err == nil {
|
||||
s.monitor.ReceivedMessage()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
|
||||
func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
|
||||
methodName := mInfo.Name
|
||||
methodType := string(typeFromMethodInfo(mInfo))
|
||||
// These are just references (no increments), as just referencing will create the labels but not set values.
|
||||
metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
if metrics.serverHandledHistogramEnabled {
|
||||
metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
}
|
||||
for _, code := range allCodes {
|
||||
metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
|
||||
}
|
||||
}
|
||||
137
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
generated
vendored
137
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
generated
vendored
@ -7,151 +7,40 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcType string
|
||||
|
||||
const (
|
||||
Unary grpcType = "unary"
|
||||
ClientStream grpcType = "client_stream"
|
||||
ServerStream grpcType = "server_stream"
|
||||
BidiStream grpcType = "bidi_stream"
|
||||
)
|
||||
|
||||
var (
|
||||
serverStartedCounter = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "server",
|
||||
Name: "started_total",
|
||||
Help: "Total number of RPCs started on the server.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"})
|
||||
|
||||
serverHandledCounter = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "server",
|
||||
Name: "handled_total",
|
||||
Help: "Total number of RPCs completed on the server, regardless of success or failure.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"})
|
||||
|
||||
serverStreamMsgReceived = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "server",
|
||||
Name: "msg_received_total",
|
||||
Help: "Total number of RPC stream messages received on the server.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"})
|
||||
|
||||
serverStreamMsgSent = prom.NewCounterVec(
|
||||
prom.CounterOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "server",
|
||||
Name: "msg_sent_total",
|
||||
Help: "Total number of gRPC stream messages sent by the server.",
|
||||
}, []string{"grpc_type", "grpc_service", "grpc_method"})
|
||||
|
||||
serverHandledHistogramEnabled = false
|
||||
serverHandledHistogramOpts = prom.HistogramOpts{
|
||||
Namespace: "grpc",
|
||||
Subsystem: "server",
|
||||
Name: "handling_seconds",
|
||||
Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
|
||||
Buckets: prom.DefBuckets,
|
||||
}
|
||||
serverHandledHistogram *prom.HistogramVec
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(serverStartedCounter)
|
||||
prom.MustRegister(serverHandledCounter)
|
||||
prom.MustRegister(serverStreamMsgReceived)
|
||||
prom.MustRegister(serverStreamMsgSent)
|
||||
}
|
||||
|
||||
type HistogramOption func(*prom.HistogramOpts)
|
||||
|
||||
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
|
||||
func WithHistogramBuckets(buckets []float64) HistogramOption {
|
||||
return func(o *prom.HistogramOpts) { o.Buckets = buckets }
|
||||
}
|
||||
|
||||
// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors.
|
||||
// Histogram metrics can be very expensive for Prometheus to retain and query.
|
||||
func EnableHandlingTimeHistogram(opts ...HistogramOption) {
|
||||
for _, o := range opts {
|
||||
o(&serverHandledHistogramOpts)
|
||||
}
|
||||
if !serverHandledHistogramEnabled {
|
||||
serverHandledHistogram = prom.NewHistogramVec(
|
||||
serverHandledHistogramOpts,
|
||||
[]string{"grpc_type", "grpc_service", "grpc_method"},
|
||||
)
|
||||
prom.Register(serverHandledHistogram)
|
||||
}
|
||||
serverHandledHistogramEnabled = true
|
||||
}
|
||||
|
||||
type serverReporter struct {
|
||||
metrics *ServerMetrics
|
||||
rpcType grpcType
|
||||
serviceName string
|
||||
methodName string
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter {
|
||||
r := &serverReporter{rpcType: rpcType}
|
||||
if serverHandledHistogramEnabled {
|
||||
func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
|
||||
r := &serverReporter{
|
||||
metrics: m,
|
||||
rpcType: rpcType,
|
||||
}
|
||||
if r.metrics.serverHandledHistogramEnabled {
|
||||
r.startTime = time.Now()
|
||||
}
|
||||
r.serviceName, r.methodName = splitMethodName(fullMethod)
|
||||
serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *serverReporter) ReceivedMessage() {
|
||||
serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *serverReporter) SentMessage() {
|
||||
serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
|
||||
}
|
||||
|
||||
func (r *serverReporter) Handled(code codes.Code) {
|
||||
serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
|
||||
if serverHandledHistogramEnabled {
|
||||
serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
|
||||
r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
|
||||
if r.metrics.serverHandledHistogramEnabled {
|
||||
r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
|
||||
func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) {
|
||||
methodName := mInfo.Name
|
||||
methodType := string(typeFromMethodInfo(mInfo))
|
||||
// These are just references (no increments), as just referencing will create the labels but not set values.
|
||||
serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
if serverHandledHistogramEnabled {
|
||||
serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
|
||||
}
|
||||
for _, code := range allCodes {
|
||||
serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
|
||||
}
|
||||
}
|
||||
|
||||
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
|
||||
if mInfo.IsClientStream == false && mInfo.IsServerStream == false {
|
||||
return Unary
|
||||
}
|
||||
if mInfo.IsClientStream == true && mInfo.IsServerStream == false {
|
||||
return ClientStream
|
||||
}
|
||||
if mInfo.IsClientStream == false && mInfo.IsServerStream == true {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
23
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
generated
vendored
23
components/engine/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
generated
vendored
@ -6,9 +6,19 @@ package grpc_prometheus
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type grpcType string
|
||||
|
||||
const (
|
||||
Unary grpcType = "unary"
|
||||
ClientStream grpcType = "client_stream"
|
||||
ServerStream grpcType = "server_stream"
|
||||
BidiStream grpcType = "bidi_stream"
|
||||
)
|
||||
|
||||
var (
|
||||
allCodes = []codes.Code{
|
||||
codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
|
||||
@ -25,3 +35,16 @@ func splitMethodName(fullMethodName string) (string, string) {
|
||||
}
|
||||
return "unknown", "unknown"
|
||||
}
|
||||
|
||||
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
|
||||
if !mInfo.IsClientStream && !mInfo.IsServerStream {
|
||||
return Unary
|
||||
}
|
||||
if mInfo.IsClientStream && !mInfo.IsServerStream {
|
||||
return ClientStream
|
||||
}
|
||||
if !mInfo.IsClientStream && mInfo.IsServerStream {
|
||||
return ServerStream
|
||||
}
|
||||
return BidiStream
|
||||
}
|
||||
|
||||
2
components/engine/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
2
components/engine/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
@ -503,6 +503,8 @@ type WindowsNetwork struct {
|
||||
DNSSearchList []string `json:"DNSSearchList,omitempty"`
|
||||
// Name (ID) of the container that we will share with the network stack.
|
||||
NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"`
|
||||
// name (ID) of the network namespace that will be used for the container.
|
||||
NetworkNamespace string `json:"networkNamespace,omitempty"`
|
||||
}
|
||||
|
||||
// WindowsHyperV contains information for configuring a container to run with Hyper-V isolation.
|
||||
|
||||
@ -87,9 +87,6 @@ func FormatMountLabel(src, mountLabel string) string {
|
||||
// SetProcessLabel takes a process label and tells the kernel to assign the
|
||||
// label to the next program executed by the current process.
|
||||
func SetProcessLabel(processLabel string) error {
|
||||
if processLabel == "" {
|
||||
return nil
|
||||
}
|
||||
return selinux.SetExecLabel(processLabel)
|
||||
}
|
||||
|
||||
@ -133,7 +130,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true}
|
||||
exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true, "/tmp": true, "/home": true, "/run": true, "/var": true, "/root": true}
|
||||
if exclude_paths[path] {
|
||||
return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
|
||||
}
|
||||
|
||||
@ -1,13 +1,16 @@
|
||||
// +build linux
|
||||
// +build selinux,linux
|
||||
|
||||
package selinux
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@ -23,14 +26,16 @@ const (
|
||||
// Permissive constant to indicate SELinux is in permissive mode
|
||||
Permissive = 0
|
||||
// Disabled constant to indicate SELinux is disabled
|
||||
Disabled = -1
|
||||
Disabled = -1
|
||||
|
||||
selinuxDir = "/etc/selinux/"
|
||||
selinuxConfig = selinuxDir + "config"
|
||||
selinuxfsMount = "/sys/fs/selinux"
|
||||
selinuxTypeTag = "SELINUXTYPE"
|
||||
selinuxTag = "SELINUX"
|
||||
selinuxPath = "/sys/fs/selinux"
|
||||
xattrNameSelinux = "security.selinux"
|
||||
stRdOnly = 0x01
|
||||
selinuxfsMagic = 0xf97cff8c
|
||||
)
|
||||
|
||||
type selinuxState struct {
|
||||
@ -43,7 +48,13 @@ type selinuxState struct {
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
|
||||
ErrMCSAlreadyExists = errors.New("MCS label already exists")
|
||||
// ErrEmptyPath is returned when an empty path has been specified.
|
||||
ErrEmptyPath = errors.New("empty path")
|
||||
|
||||
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
||||
roFileLabel string
|
||||
state = selinuxState{
|
||||
mcsList: make(map[string]bool),
|
||||
}
|
||||
@ -91,6 +102,83 @@ func (s *selinuxState) setSELinuxfs(selinuxfs string) string {
|
||||
return s.selinuxfs
|
||||
}
|
||||
|
||||
func verifySELinuxfsMount(mnt string) bool {
|
||||
var buf syscall.Statfs_t
|
||||
for {
|
||||
err := syscall.Statfs(mnt, &buf)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err == syscall.EAGAIN {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
if uint32(buf.Type) != uint32(selinuxfsMagic) {
|
||||
return false
|
||||
}
|
||||
if (buf.Flags & stRdOnly) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func findSELinuxfs() string {
|
||||
// fast path: check the default mount first
|
||||
if verifySELinuxfsMount(selinuxfsMount) {
|
||||
return selinuxfsMount
|
||||
}
|
||||
|
||||
// check if selinuxfs is available before going the slow path
|
||||
fs, err := ioutil.ReadFile("/proc/filesystems")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if !bytes.Contains(fs, []byte("\tselinuxfs\n")) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// slow path: try to find among the mounts
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for {
|
||||
mnt := findSELinuxfsMount(scanner)
|
||||
if mnt == "" { // error or not found
|
||||
return ""
|
||||
}
|
||||
if verifySELinuxfsMount(mnt) {
|
||||
return mnt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findSELinuxfsMount returns a next selinuxfs mount point found,
|
||||
// if there is one, or an empty string in case of EOF or error.
|
||||
func findSELinuxfsMount(s *bufio.Scanner) string {
|
||||
for s.Scan() {
|
||||
txt := s.Text()
|
||||
// The first field after - is fs type.
|
||||
// Safe as spaces in mountpoints are encoded as \040
|
||||
if !strings.Contains(txt, " - selinuxfs ") {
|
||||
continue
|
||||
}
|
||||
const mPos = 5 // mount point is 5th field
|
||||
fields := strings.SplitN(txt, " ", mPos+1)
|
||||
if len(fields) < mPos+1 {
|
||||
continue
|
||||
}
|
||||
return fields[mPos-1]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *selinuxState) getSELinuxfs() string {
|
||||
s.Lock()
|
||||
selinuxfs := s.selinuxfs
|
||||
@ -100,40 +188,7 @@ func (s *selinuxState) getSELinuxfs() string {
|
||||
return selinuxfs
|
||||
}
|
||||
|
||||
selinuxfs = ""
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return selinuxfs
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
txt := scanner.Text()
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
sepIdx := strings.Index(txt, " - ")
|
||||
if sepIdx == -1 {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(txt[sepIdx:], "selinuxfs") {
|
||||
continue
|
||||
}
|
||||
fields := strings.Split(txt, " ")
|
||||
if len(fields) < 5 {
|
||||
continue
|
||||
}
|
||||
selinuxfs = fields[4]
|
||||
break
|
||||
}
|
||||
|
||||
if selinuxfs != "" {
|
||||
var buf syscall.Statfs_t
|
||||
syscall.Statfs(selinuxfs, &buf)
|
||||
if (buf.Flags & stRdOnly) == 1 {
|
||||
selinuxfs = ""
|
||||
}
|
||||
}
|
||||
return s.setSELinuxfs(selinuxfs)
|
||||
return s.setSELinuxfs(findSELinuxfs())
|
||||
}
|
||||
|
||||
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
|
||||
@ -150,7 +205,7 @@ func GetEnabled() bool {
|
||||
return state.getEnabled()
|
||||
}
|
||||
|
||||
func readConfig(target string) (value string) {
|
||||
func readConfig(target string) string {
|
||||
var (
|
||||
val, key string
|
||||
bufin *bufio.Reader
|
||||
@ -192,30 +247,42 @@ func readConfig(target string) (value string) {
|
||||
}
|
||||
|
||||
func getSELinuxPolicyRoot() string {
|
||||
return selinuxDir + readConfig(selinuxTypeTag)
|
||||
return filepath.Join(selinuxDir, readConfig(selinuxTypeTag))
|
||||
}
|
||||
|
||||
func readCon(name string) (string, error) {
|
||||
var val string
|
||||
func readCon(fpath string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
}
|
||||
|
||||
in, err := os.Open(name)
|
||||
in, err := os.Open(fpath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = fmt.Fscanf(in, "%s", &val)
|
||||
return val, err
|
||||
var retval string
|
||||
if _, err := fmt.Fscanf(in, "%s", &retval); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Trim(retval, "\x00"), nil
|
||||
}
|
||||
|
||||
// SetFileLabel sets the SELinux label for this path or returns an error.
|
||||
func SetFileLabel(path string, label string) error {
|
||||
return lsetxattr(path, xattrNameSelinux, []byte(label), 0)
|
||||
func SetFileLabel(fpath string, label string) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
return lsetxattr(fpath, xattrNameSelinux, []byte(label), 0)
|
||||
}
|
||||
|
||||
// FileLabel returns the SELinux label for this path or returns an error.
|
||||
func FileLabel(path string) (string, error) {
|
||||
label, err := lgetxattr(path, xattrNameSelinux)
|
||||
func FileLabel(fpath string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
}
|
||||
|
||||
label, err := lgetxattr(fpath, xattrNameSelinux)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -260,8 +327,12 @@ func ExecLabel() (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
|
||||
}
|
||||
|
||||
func writeCon(name string, val string) error {
|
||||
out, err := os.OpenFile(name, os.O_WRONLY, 0)
|
||||
func writeCon(fpath string, val string) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(fpath, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -275,6 +346,37 @@ func writeCon(name string, val string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
CanonicalizeContext takes a context string and writes it to the kernel
|
||||
the function then returns the context that the kernel will use. This function
|
||||
can be used to see if two contexts are equivalent
|
||||
*/
|
||||
func CanonicalizeContext(val string) (string, error) {
|
||||
return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val)
|
||||
}
|
||||
|
||||
func readWriteCon(fpath string, val string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
}
|
||||
f, err := os.OpenFile(fpath, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write([]byte(val))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var retval string
|
||||
if _, err := fmt.Fscanf(f, "%s", &retval); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Trim(retval, "\x00"), nil
|
||||
}
|
||||
|
||||
/*
|
||||
SetExecLabel sets the SELinux label that the kernel will use for any programs
|
||||
that are executed by the current process thread, or an error.
|
||||
@ -285,7 +387,10 @@ func SetExecLabel(label string) error {
|
||||
|
||||
// Get returns the Context as a string
|
||||
func (c Context) Get() string {
|
||||
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
|
||||
if c["level"] != "" {
|
||||
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
|
||||
}
|
||||
return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"])
|
||||
}
|
||||
|
||||
// NewContext creates a new Context struct from the specified label
|
||||
@ -297,7 +402,9 @@ func NewContext(label string) Context {
|
||||
c["user"] = con[0]
|
||||
c["role"] = con[1]
|
||||
c["type"] = con[2]
|
||||
c["level"] = con[3]
|
||||
if len(con) > 3 {
|
||||
c["level"] = con[3]
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
@ -306,12 +413,14 @@ func NewContext(label string) Context {
|
||||
func ReserveLabel(label string) {
|
||||
if len(label) != 0 {
|
||||
con := strings.SplitN(label, ":", 4)
|
||||
mcsAdd(con[3])
|
||||
if len(con) > 3 {
|
||||
mcsAdd(con[3])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func selinuxEnforcePath() string {
|
||||
return fmt.Sprintf("%s/enforce", selinuxPath)
|
||||
return fmt.Sprintf("%s/enforce", getSelinuxMountPoint())
|
||||
}
|
||||
|
||||
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
|
||||
@ -354,16 +463,22 @@ func DefaultEnforceMode() int {
|
||||
}
|
||||
|
||||
func mcsAdd(mcs string) error {
|
||||
if mcs == "" {
|
||||
return nil
|
||||
}
|
||||
state.Lock()
|
||||
defer state.Unlock()
|
||||
if state.mcsList[mcs] {
|
||||
return fmt.Errorf("MCS Label already exists")
|
||||
return ErrMCSAlreadyExists
|
||||
}
|
||||
state.mcsList[mcs] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func mcsDelete(mcs string) {
|
||||
if mcs == "" {
|
||||
return
|
||||
}
|
||||
state.Lock()
|
||||
defer state.Unlock()
|
||||
state.mcsList[mcs] = false
|
||||
@ -424,14 +539,14 @@ Allowing it to be used by another process.
|
||||
func ReleaseLabel(label string) {
|
||||
if len(label) != 0 {
|
||||
con := strings.SplitN(label, ":", 4)
|
||||
mcsDelete(con[3])
|
||||
if len(con) > 3 {
|
||||
mcsDelete(con[3])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var roFileLabel string
|
||||
|
||||
// ROFileLabel returns the specified SELinux readonly file label
|
||||
func ROFileLabel() (fileLabel string) {
|
||||
func ROFileLabel() string {
|
||||
return roFileLabel
|
||||
}
|
||||
|
||||
@ -497,23 +612,25 @@ func ContainerLabels() (processLabel string, fileLabel string) {
|
||||
roFileLabel = fileLabel
|
||||
}
|
||||
exit:
|
||||
mcs := uniqMcs(1024)
|
||||
scon := NewContext(processLabel)
|
||||
scon["level"] = mcs
|
||||
processLabel = scon.Get()
|
||||
scon = NewContext(fileLabel)
|
||||
scon["level"] = mcs
|
||||
fileLabel = scon.Get()
|
||||
if scon["level"] != "" {
|
||||
mcs := uniqMcs(1024)
|
||||
scon["level"] = mcs
|
||||
processLabel = scon.Get()
|
||||
scon = NewContext(fileLabel)
|
||||
scon["level"] = mcs
|
||||
fileLabel = scon.Get()
|
||||
}
|
||||
return processLabel, fileLabel
|
||||
}
|
||||
|
||||
// SecurityCheckContext validates that the SELinux label is understood by the kernel
|
||||
func SecurityCheckContext(val string) error {
|
||||
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
|
||||
return writeCon(fmt.Sprintf("%s/context", getSelinuxMountPoint()), val)
|
||||
}
|
||||
|
||||
/*
|
||||
CopyLevel returns a label with the MLS/MCS level from src label replaces on
|
||||
CopyLevel returns a label with the MLS/MCS level from src label replaced on
|
||||
the dest label.
|
||||
*/
|
||||
func CopyLevel(src, dest string) (string, error) {
|
||||
@ -536,20 +653,26 @@ func CopyLevel(src, dest string) (string, error) {
|
||||
|
||||
// Prevent users from relabing system files
|
||||
func badPrefix(fpath string) error {
|
||||
var badprefixes = []string{"/usr"}
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
|
||||
for _, prefix := range badprefixes {
|
||||
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
|
||||
badPrefixes := []string{"/usr"}
|
||||
for _, prefix := range badPrefixes {
|
||||
if strings.HasPrefix(fpath, prefix) {
|
||||
return fmt.Errorf("relabeling content in %s is not allowed", prefix)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Chcon changes the fpath file object to the SELinux label label.
|
||||
// If the fpath is a directory and recurse is true Chcon will walk the
|
||||
// directory tree setting the label
|
||||
// Chcon changes the `fpath` file object to the SELinux label `label`.
|
||||
// If `fpath` is a directory and `recurse`` is true, Chcon will walk the
|
||||
// directory tree setting the label.
|
||||
func Chcon(fpath string, label string, recurse bool) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
if label == "" {
|
||||
return nil
|
||||
}
|
||||
@ -568,7 +691,7 @@ func Chcon(fpath string, label string, recurse bool) error {
|
||||
}
|
||||
|
||||
// DupSecOpt takes an SELinux process label and returns security options that
|
||||
// can will set the SELinux Type and Level for future container processes
|
||||
// can be used to set the SELinux Type and Level for future container processes.
|
||||
func DupSecOpt(src string) []string {
|
||||
if src == "" {
|
||||
return nil
|
||||
@ -576,18 +699,23 @@ func DupSecOpt(src string) []string {
|
||||
con := NewContext(src)
|
||||
if con["user"] == "" ||
|
||||
con["role"] == "" ||
|
||||
con["type"] == "" ||
|
||||
con["level"] == "" {
|
||||
con["type"] == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{"user:" + con["user"],
|
||||
dup := []string{"user:" + con["user"],
|
||||
"role:" + con["role"],
|
||||
"type:" + con["type"],
|
||||
"level:" + con["level"]}
|
||||
}
|
||||
|
||||
if con["level"] != "" {
|
||||
dup = append(dup, "level:"+con["level"])
|
||||
}
|
||||
|
||||
return dup
|
||||
}
|
||||
|
||||
// DisableSecOpt returns a security opt that can be used to disabling SELinux
|
||||
// labeling support for future container processes
|
||||
// DisableSecOpt returns a security opt that can be used to disable SELinux
|
||||
// labeling support for future container processes.
|
||||
func DisableSecOpt() []string {
|
||||
return []string{"disable"}
|
||||
}
|
||||
188
components/engine/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
Normal file
188
components/engine/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
// +build !selinux
|
||||
|
||||
package selinux
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// Enforcing constant indicate SELinux is in enforcing mode
|
||||
Enforcing = 1
|
||||
// Permissive constant to indicate SELinux is in permissive mode
|
||||
Permissive = 0
|
||||
// Disabled constant to indicate SELinux is disabled
|
||||
Disabled = -1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
|
||||
ErrMCSAlreadyExists = errors.New("MCS label already exists")
|
||||
// ErrEmptyPath is returned when an empty path has been specified.
|
||||
ErrEmptyPath = errors.New("empty path")
|
||||
)
|
||||
|
||||
// Context is a representation of the SELinux label broken into 4 parts
|
||||
type Context map[string]string
|
||||
|
||||
// SetDisabled disables selinux support for the package
|
||||
func SetDisabled() {
|
||||
return
|
||||
}
|
||||
|
||||
// GetEnabled returns whether selinux is currently enabled.
|
||||
func GetEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetFileLabel sets the SELinux label for this path or returns an error.
|
||||
func SetFileLabel(fpath string, label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileLabel returns the SELinux label for this path or returns an error.
|
||||
func FileLabel(fpath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
SetFSCreateLabel tells kernel the label to create all file system objects
|
||||
created by this task. Setting label="" to return to default.
|
||||
*/
|
||||
func SetFSCreateLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
FSCreateLabel returns the default label the kernel which the kernel is using
|
||||
for file system objects created by this task. "" indicates default.
|
||||
*/
|
||||
func FSCreateLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// CurrentLabel returns the SELinux label of the current process thread, or an error.
|
||||
func CurrentLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// PidLabel returns the SELinux label of the given pid, or an error.
|
||||
func PidLabel(pid int) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
ExecLabel returns the SELinux label that the kernel will use for any programs
|
||||
that are executed by the current process thread, or an error.
|
||||
*/
|
||||
func ExecLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
CanonicalizeContext takes a context string and writes it to the kernel
|
||||
the function then returns the context that the kernel will use. This function
|
||||
can be used to see if two contexts are equivalent
|
||||
*/
|
||||
func CanonicalizeContext(val string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
SetExecLabel sets the SELinux label that the kernel will use for any programs
|
||||
that are executed by the current process thread, or an error.
|
||||
*/
|
||||
func SetExecLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the Context as a string
|
||||
func (c Context) Get() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewContext creates a new Context struct from the specified label
|
||||
func NewContext(label string) Context {
|
||||
c := make(Context)
|
||||
return c
|
||||
}
|
||||
|
||||
// ReserveLabel reserves the MLS/MCS level component of the specified label
|
||||
func ReserveLabel(label string) {
|
||||
return
|
||||
}
|
||||
|
||||
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
|
||||
func EnforceMode() int {
|
||||
return Disabled
|
||||
}
|
||||
|
||||
/*
|
||||
SetEnforceMode sets the current SELinux mode Enforcing, Permissive.
|
||||
Disabled is not valid, since this needs to be set at boot time.
|
||||
*/
|
||||
func SetEnforceMode(mode int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
DefaultEnforceMode returns the systems default SELinux mode Enforcing,
|
||||
Permissive or Disabled. Note this is is just the default at boot time.
|
||||
EnforceMode tells you the systems current mode.
|
||||
*/
|
||||
func DefaultEnforceMode() int {
|
||||
return Disabled
|
||||
}
|
||||
|
||||
/*
|
||||
ReleaseLabel will unreserve the MLS/MCS Level field of the specified label.
|
||||
Allowing it to be used by another process.
|
||||
*/
|
||||
func ReleaseLabel(label string) {
|
||||
return
|
||||
}
|
||||
|
||||
// ROFileLabel returns the specified SELinux readonly file label
|
||||
func ROFileLabel() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
/*
|
||||
ContainerLabels returns an allocated processLabel and fileLabel to be used for
|
||||
container labeling by the calling process.
|
||||
*/
|
||||
func ContainerLabels() (processLabel string, fileLabel string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// SecurityCheckContext validates that the SELinux label is understood by the kernel
|
||||
func SecurityCheckContext(val string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
CopyLevel returns a label with the MLS/MCS level from src label replaced on
|
||||
the dest label.
|
||||
*/
|
||||
func CopyLevel(src, dest string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Chcon changes the `fpath` file object to the SELinux label `label`.
|
||||
// If `fpath` is a directory and `recurse`` is true, Chcon will walk the
|
||||
// directory tree setting the label.
|
||||
func Chcon(fpath string, label string, recurse bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DupSecOpt takes an SELinux process label and returns security options that
|
||||
// can be used to set the SELinux Type and Level for future container processes.
|
||||
func DupSecOpt(src string) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableSecOpt returns a security opt that can be used to disable SELinux
|
||||
// labeling support for future container processes.
|
||||
func DisableSecOpt() []string {
|
||||
return []string{"disable"}
|
||||
}
|
||||
2
components/engine/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go
generated
vendored
2
components/engine/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build selinux,linux
|
||||
|
||||
package selinux
|
||||
|
||||
|
||||
Reference in New Issue
Block a user