Hide the mutex in formatter.ContainerStats

The formatter.ContainerStats struct exposes its Mutex.
This is a bad design and should be fixed.

To fix that, I separated the statistics
attributes from ContainerStats to StatsEntry and
hid the mutex. Notice that the mutex protects both
the `err` field and the statistics attributes.

Then, implemented SetStatistics, SetError, GetStatistics
and GetError to avoid races.

Moreover, to make this less granular, I decided to
replace the read-write mutex with the regular mutex and
to pass a StatsEntry slice to formatter.ContainerStatsWrite

Signed-off-by: Boaz Shuster <ripcurld.github@gmail.com>
This commit is contained in:
Boaz Shuster
2016-09-22 15:54:41 +03:00
parent bfbdb15f55
commit 3bc50c45ba
3 changed files with 132 additions and 81 deletions

View File

@ -166,11 +166,10 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
var errs []string
cStats.mu.Lock()
for _, c := range cStats.cs {
c.Mu.Lock()
if c.Err != nil {
errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.Err))
cErr := c.GetError()
if cErr != nil {
errs = append(errs, fmt.Sprintf("%s: %v", c.Name, cErr))
}
c.Mu.Unlock()
}
cStats.mu.Unlock()
if len(errs) > 0 {
@ -189,7 +188,7 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
Format: formatter.NewStatsFormat(f, daemonOSType),
}
cleanHeader := func() {
cleanScreen := func() {
if !opts.noStream {
fmt.Fprint(dockerCli.Out(), "\033[2J")
fmt.Fprint(dockerCli.Out(), "\033[H")
@ -198,14 +197,17 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
var err error
for range time.Tick(500 * time.Millisecond) {
cleanHeader()
cStats.mu.RLock()
csLen := len(cStats.cs)
if err = formatter.ContainerStatsWrite(statsCtx, cStats.cs); err != nil {
cleanScreen()
ccstats := []formatter.StatsEntry{}
cStats.mu.Lock()
for _, c := range cStats.cs {
ccstats = append(ccstats, c.GetStatistics())
}
cStats.mu.Unlock()
if err = formatter.ContainerStatsWrite(statsCtx, ccstats); err != nil {
break
}
cStats.mu.RUnlock()
if csLen == 0 && !showAll {
if len(cStats.cs) == 0 && !showAll {
break
}
if opts.noStream {

View File

@ -17,7 +17,7 @@ import (
type stats struct {
ostype string
mu sync.RWMutex
mu sync.Mutex
cs []*formatter.ContainerStats
}
@ -72,9 +72,7 @@ func collect(s *formatter.ContainerStats, ctx context.Context, cli client.APICli
response, err := cli.ContainerStats(ctx, s.Name, streamStats)
if err != nil {
s.Mu.Lock()
s.Err = err
s.Mu.Unlock()
s.SetError(err)
return
}
defer response.Body.Close()
@ -88,6 +86,9 @@ func collect(s *formatter.ContainerStats, ctx context.Context, cli client.APICli
cpuPercent = 0.0
blkRead, blkWrite uint64 // Only used on Linux
mem = 0.0
memLimit = 0.0
memPerc = 0.0
pidsStatsCurrent uint64
)
if err := dec.Decode(&v); err != nil {
@ -113,26 +114,27 @@ func collect(s *formatter.ContainerStats, ctx context.Context, cli client.APICli
cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v)
blkRead, blkWrite = calculateBlockIO(v.BlkioStats)
mem = float64(v.MemoryStats.Usage)
memLimit = float64(v.MemoryStats.Limit)
memPerc = memPercent
pidsStatsCurrent = v.PidsStats.Current
} else {
cpuPercent = calculateCPUPercentWindows(v)
blkRead = v.StorageStats.ReadSizeBytes
blkWrite = v.StorageStats.WriteSizeBytes
mem = float64(v.MemoryStats.PrivateWorkingSet)
}
s.Mu.Lock()
s.CPUPercentage = cpuPercent
s.Memory = mem
s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
s.BlockRead = float64(blkRead)
s.BlockWrite = float64(blkWrite)
if daemonOSType != "windows" {
s.MemoryLimit = float64(v.MemoryStats.Limit)
s.MemoryPercentage = memPercent
s.PidsCurrent = v.PidsStats.Current
}
s.Mu.Unlock()
netRx, netTx := calculateNetwork(v.Networks)
s.SetStatistics(formatter.StatsEntry{
CPUPercentage: cpuPercent,
Memory: mem,
MemoryPercentage: memPerc,
MemoryLimit: memLimit,
NetworkRx: netRx,
NetworkTx: netTx,
BlockRead: float64(blkRead),
BlockWrite: float64(blkWrite),
PidsCurrent: pidsStatsCurrent,
})
u <- nil
if !streamStats {
return
@ -144,18 +146,7 @@ func collect(s *formatter.ContainerStats, ctx context.Context, cli client.APICli
case <-time.After(2 * time.Second):
// zero out the values if we have not received an update within
// the specified duration.
s.Mu.Lock()
s.CPUPercentage = 0
s.Memory = 0
s.MemoryPercentage = 0
s.MemoryLimit = 0
s.NetworkRx = 0
s.NetworkTx = 0
s.BlockRead = 0
s.BlockWrite = 0
s.PidsCurrent = 0
s.Err = errors.New("timeout waiting for stats")
s.Mu.Unlock()
s.SetErrorAndReset(errors.New("timeout waiting for stats"))
// if this is the first stat you get, release WaitGroup
if !getFirst {
getFirst = true
@ -163,12 +154,10 @@ func collect(s *formatter.ContainerStats, ctx context.Context, cli client.APICli
}
case err := <-u:
if err != nil {
s.Mu.Lock()
s.Err = err
s.Mu.Unlock()
s.SetError(err)
continue
}
s.Err = nil
s.SetError(nil)
// if this is the first stat you get, release WaitGroup
if !getFirst {
getFirst = true