Merge component 'engine' from git@github.com:moby/moby master

This commit is contained in:
GordonTheTurtle
2017-12-20 17:04:25 +00:00
10 changed files with 455 additions and 92 deletions

View File

@ -303,9 +303,8 @@ commit automatically with `git commit -s`.
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the
global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
repository.
[/project/GOVERNANCE.md](/project/GOVERNANCE.md)
file in this repository.
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
@ -371,6 +370,11 @@ guidelines for the community as a whole:
used to ping maintainers to review a pull request, a proposal or an
issue.
The open source governance for this repository is handled via the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc)
charter. For any concerns with the community process regarding technical contributions,
please contact the TSC. More information on project governance is available in
our [project/GOVERNANCE.md](/project/GOVERNANCE.md) document.
### Guideline violations — 3 strikes method
The point of this section is not to find opportunities to punish people, but we

View File

@ -1,12 +1,14 @@
# Moby maintainers file
#
# This file describes who runs the docker/docker project and how.
# This is a living document - if you see something out of date or missing, speak up!
# This file describes the maintainer groups within the moby/moby project.
# More detail on Moby project governance is available in the
# project/GOVERNANCE.md file found in this repository.
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
# TODO(estesp): This file should not necessarily depend on docker/opensource
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]

View File

@ -62,8 +62,8 @@ import (
"github.com/pkg/errors"
)
// MainNamespace is the name of the namespace used for users containers
const MainNamespace = "moby"
// ContainersNamespace is the name of the namespace used for users containers
const ContainersNamespace = "moby"
var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
@ -247,6 +247,11 @@ func (daemon *Daemon) restore() error {
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
}
} else if !daemon.configStore.LiveRestoreEnabled {
if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
return
}
}
if c.IsRunning() || c.IsPaused() {
@ -317,24 +322,24 @@ func (daemon *Daemon) restore() error {
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
} else {
// get list of containers we need to restart
}
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
// get list of containers we need to restart
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
c.Lock()
@ -890,7 +895,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
go d.execCommandGC()
d.containerd, err = containerdRemote.NewClient(MainNamespace, d)
d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d)
if err != nil {
return nil, err
}

View File

@ -95,6 +95,17 @@ func init() {
}
}
// eventBatch holds the events that are batched for submission and the
// associated data about it.
//
// Warning: this type is not threadsafe and must not be used
// concurrently. This type is expected to be consumed in a single go
// routine and never concurrently.
type eventBatch struct {
batch []wrappedEvent
bytes int
}
// New creates an awslogs logger using the configuration passed in on the
// context. Supported context configuration variables are awslogs-region,
// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern
@ -389,32 +400,32 @@ var newTicker = func(freq time.Duration) *time.Ticker {
// Logs, the processEvents method is called. If a multiline pattern is not
// configured, log events are submitted to the processEvents method immediately.
func (l *logStream) collectBatch() {
timer := newTicker(batchPublishFrequency)
var events []wrappedEvent
ticker := newTicker(batchPublishFrequency)
var eventBuffer []byte
var eventBufferTimestamp int64
var batch = newEventBatch()
for {
select {
case t := <-timer.C:
case t := <-ticker.C:
// If event buffer is older than batch publish frequency flush the event buffer
if eventBufferTimestamp > 0 && len(eventBuffer) > 0 {
eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp
eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond)
eventBufferNegative := eventBufferAge < 0
if eventBufferExpired || eventBufferNegative {
events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
}
}
l.publishBatch(events)
events = events[:0]
l.publishBatch(batch)
batch.reset()
case msg, more := <-l.messages:
if !more {
// Flush event buffer and release resources
events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
l.publishBatch(events)
events = events[:0]
l.publishBatch(batch)
batch.reset()
return
}
if eventBufferTimestamp == 0 {
@ -425,7 +436,7 @@ func (l *logStream) collectBatch() {
if l.multilinePattern.Match(unprocessedLine) || len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent {
// This is a new log event or we will exceed max bytes per event
// so flush the current eventBuffer to events and reset timestamp
events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
eventBuffer = eventBuffer[:0]
}
@ -434,7 +445,7 @@ func (l *logStream) collectBatch() {
eventBuffer = append(eventBuffer, processedLine...)
logger.PutMessage(msg)
} else {
events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
l.processEvent(batch, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
logger.PutMessage(msg)
}
}
@ -450,8 +461,7 @@ func (l *logStream) collectBatch() {
// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event
// byte overhead (defined in perEventBytes) which is accounted for in split- and
// batch-calculations.
func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent {
bytes := 0
func (l *logStream) processEvent(batch *eventBatch, unprocessedLine []byte, timestamp int64) {
for len(unprocessedLine) > 0 {
// Split line length so it does not exceed the maximum
lineBytes := len(unprocessedLine)
@ -459,38 +469,33 @@ func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte,
lineBytes = maximumBytesPerEvent
}
line := unprocessedLine[:lineBytes]
unprocessedLine = unprocessedLine[lineBytes:]
if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) {
// Publish an existing batch if it's already over the maximum number of events or if adding this
// event would push it over the maximum number of total bytes.
l.publishBatch(events)
events = events[:0]
bytes = 0
}
events = append(events, wrappedEvent{
event := wrappedEvent{
inputLogEvent: &cloudwatchlogs.InputLogEvent{
Message: aws.String(string(line)),
Timestamp: aws.Int64(timestamp),
},
insertOrder: len(events),
})
bytes += (lineBytes + perEventBytes)
insertOrder: batch.count(),
}
added := batch.add(event, lineBytes)
if added {
unprocessedLine = unprocessedLine[lineBytes:]
} else {
l.publishBatch(batch)
batch.reset()
}
}
return events
}
// publishBatch calls PutLogEvents for a given set of InputLogEvents,
// accounting for sequencing requirements (each request must reference the
// sequence token returned by the previous request).
func (l *logStream) publishBatch(events []wrappedEvent) {
if len(events) == 0 {
func (l *logStream) publishBatch(batch *eventBatch) {
if batch.isEmpty() {
return
}
// events in a batch must be sorted by timestamp
// see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
sort.Sort(byTimestamp(events))
cwEvents := unwrapEvents(events)
cwEvents := unwrapEvents(batch.events())
nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken)
@ -615,3 +620,70 @@ func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent {
}
return cwEvents
}
func newEventBatch() *eventBatch {
return &eventBatch{
batch: make([]wrappedEvent, 0),
bytes: 0,
}
}
// events returns a slice of wrappedEvents sorted in order of their
// timestamps and then by their insertion order (see `byTimestamp`).
//
// Warning: this method is not threadsafe and must not be used
// concurrently.
func (b *eventBatch) events() []wrappedEvent {
sort.Sort(byTimestamp(b.batch))
return b.batch
}
// add adds an event to the batch of events accounting for the
// necessary overhead for an event to be logged. An error will be
// returned if the event cannot be added to the batch due to service
// limits.
//
// Warning: this method is not threadsafe and must not be used
// concurrently.
func (b *eventBatch) add(event wrappedEvent, size int) bool {
addBytes := size + perEventBytes
// verify we are still within service limits
switch {
case len(b.batch)+1 > maximumLogEventsPerPut:
return false
case b.bytes+addBytes > maximumBytesPerPut:
return false
}
b.bytes += addBytes
b.batch = append(b.batch, event)
return true
}
// count is the number of batched events. Warning: this method
// is not threadsafe and must not be used concurrently.
func (b *eventBatch) count() int {
return len(b.batch)
}
// size is the total number of bytes that the batch represents.
//
// Warning: this method is not threadsafe and must not be used
// concurrently.
func (b *eventBatch) size() int {
return b.bytes
}
func (b *eventBatch) isEmpty() bool {
zeroEvents := b.count() == 0
zeroSize := b.size() == 0
return zeroEvents && zeroSize
}
// reset prepares the batch for reuse.
func (b *eventBatch) reset() {
b.bytes = 0
b.batch = b.batch[:0]
}

View File

@ -49,6 +49,15 @@ func (l *logStream) logGenerator(lineCount int, multilineCount int) {
}
}
func testEventBatch(events []wrappedEvent) *eventBatch {
batch := newEventBatch()
for _, event := range events {
eventlen := len([]byte(*event.inputLogEvent.Message))
batch.add(event, eventlen)
}
return batch
}
func TestNewAWSLogsClientUserAgentHandler(t *testing.T) {
info := logger.Info{
Config: map[string]string{
@ -212,7 +221,7 @@ func TestPublishBatchSuccess(t *testing.T) {
},
}
stream.publishBatch(events)
stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@ -257,7 +266,7 @@ func TestPublishBatchError(t *testing.T) {
},
}
stream.publishBatch(events)
stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@ -291,7 +300,7 @@ func TestPublishBatchInvalidSeqSuccess(t *testing.T) {
},
}
stream.publishBatch(events)
stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@ -354,7 +363,7 @@ func TestPublishBatchAlreadyAccepted(t *testing.T) {
},
}
stream.publishBatch(events)
stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@ -859,7 +868,8 @@ func TestCollectBatchMaxEvents(t *testing.T) {
}
func TestCollectBatchMaxTotalBytes(t *testing.T) {
mockClient := newMockClientBuffered(1)
expectedPuts := 2
mockClient := newMockClientBuffered(expectedPuts)
stream := &logStream{
client: mockClient,
logGroupName: groupName,
@ -867,11 +877,14 @@ func TestCollectBatchMaxTotalBytes(t *testing.T) {
sequenceToken: aws.String(sequenceToken),
messages: make(chan *logger.Message),
}
mockClient.putLogEventsResult <- &putLogEventsResult{
successResult: &cloudwatchlogs.PutLogEventsOutput{
NextSequenceToken: aws.String(nextSequenceToken),
},
for i := 0; i < expectedPuts; i++ {
mockClient.putLogEventsResult <- &putLogEventsResult{
successResult: &cloudwatchlogs.PutLogEventsOutput{
NextSequenceToken: aws.String(nextSequenceToken),
},
}
}
var ticks = make(chan time.Time)
newTicker = func(_ time.Duration) *time.Ticker {
return &time.Ticker{
@ -881,32 +894,57 @@ func TestCollectBatchMaxTotalBytes(t *testing.T) {
go stream.collectBatch()
longline := strings.Repeat("A", maximumBytesPerPut)
numPayloads := maximumBytesPerPut / (maximumBytesPerEvent + perEventBytes)
// maxline is the maximum line that could be submitted after
// accounting for its overhead.
maxline := strings.Repeat("A", maximumBytesPerPut-(perEventBytes*numPayloads))
// This will be split and batched up to the `maximumBytesPerPut'
// (+/- `maximumBytesPerEvent'). This /should/ be aligned, but
// should also tolerate an offset within that range.
stream.Log(&logger.Message{
Line: []byte(longline + "B"),
Line: []byte(maxline[:len(maxline)/2]),
Timestamp: time.Time{},
})
stream.Log(&logger.Message{
Line: []byte(maxline[len(maxline)/2:]),
Timestamp: time.Time{},
})
stream.Log(&logger.Message{
Line: []byte("B"),
Timestamp: time.Time{},
})
// no ticks
// no ticks, guarantee batch by size (and chan close)
stream.Close()
argument := <-mockClient.putLogEventsArgument
if argument == nil {
t.Fatal("Expected non-nil PutLogEventsInput")
}
bytes := 0
// Should total to the maximum allowed bytes.
eventBytes := 0
for _, event := range argument.LogEvents {
bytes += len(*event.Message)
eventBytes += len(*event.Message)
}
if bytes > maximumBytesPerPut {
t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes)
eventsOverhead := len(argument.LogEvents) * perEventBytes
payloadTotal := eventBytes + eventsOverhead
// lowestMaxBatch allows the payload to be offset if the messages
// don't lend themselves to align with the maximum event size.
lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent
if payloadTotal > maximumBytesPerPut {
t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, payloadTotal)
}
if payloadTotal < lowestMaxBatch {
t.Errorf("Batch to be no less than %d but was %d", lowestMaxBatch, payloadTotal)
}
argument = <-mockClient.putLogEventsArgument
if len(argument.LogEvents) != 1 {
t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents))
}
message := *argument.LogEvents[0].Message
message := *argument.LogEvents[len(argument.LogEvents)-1].Message
if message[len(message)-1:] != "B" {
t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:])
}

View File

@ -1,6 +1,10 @@
package awslogs
import "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
import (
"fmt"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
)
type mockcwlogsclient struct {
createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput
@ -67,7 +71,30 @@ func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput)
LogGroupName: input.LogGroupName,
LogStreamName: input.LogStreamName,
}
// Intended mock output
output := <-m.putLogEventsResult
// Checked enforced limits in mock
totalBytes := 0
for _, evt := range events {
if evt.Message == nil {
continue
}
eventBytes := len([]byte(*evt.Message))
if eventBytes > maximumBytesPerEvent {
// exceeded per event message size limits
return nil, fmt.Errorf("maximum bytes per event exceeded: Event too large %d, max allowed: %d", eventBytes, maximumBytesPerEvent)
}
// total event bytes including overhead
totalBytes += eventBytes + perEventBytes
}
if totalBytes > maximumBytesPerPut {
// exceeded per put maximum size limit
return nil, fmt.Errorf("maximum bytes per put exceeded: Upload too large %d, max allowed: %d", totalBytes, maximumBytesPerPut)
}
return output.successResult, output.errorResult
}

View File

@ -1451,7 +1451,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
"--namespace", moby_daemon.MainNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
"--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
// restart daemon.
d.Restart(c)
@ -2011,7 +2011,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
"--namespace", moby_daemon.MainNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
"--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
// Give time to containerd to process the command if we don't
// the exit event might be received after we do the inspect
@ -2106,7 +2106,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *che
result := icmd.RunCommand(
ctrBinary,
"--address", "/var/run/docker/containerd/docker-containerd.sock",
"--namespace", moby_daemon.MainNamespace,
"--namespace", moby_daemon.ContainersNamespace,
"tasks", "resume", cid)
result.Assert(t, icmd.Success)

View File

@ -0,0 +1,112 @@
package container
import (
"context"
"fmt"
"testing"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/integration-cli/daemon"
)
func TestDaemonRestartKillContainers(t *testing.T) {
type testCase struct {
desc string
config *container.Config
hostConfig *container.HostConfig
xRunning bool
xRunningLiveRestore bool
}
for _, c := range []testCase{
{
desc: "container without restart policy",
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
xRunningLiveRestore: true,
},
{
desc: "container with restart=always",
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
xRunning: true,
xRunningLiveRestore: true,
},
} {
for _, liveRestoreEnabled := range []bool{false, true} {
for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){
"kill-daemon": func(t *testing.T, d *daemon.Daemon) {
if err := d.Kill(); err != nil {
t.Fatal(err)
}
},
"stop-daemon": func(t *testing.T, d *daemon.Daemon) {
d.Stop(t)
},
} {
t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) {
c := c
liveRestoreEnabled := liveRestoreEnabled
stopDaemon := stopDaemon
t.Parallel()
d := daemon.New(t, "", "dockerd", daemon.Config{})
client, err := d.NewClient()
if err != nil {
t.Fatal(err)
}
var args []string
if liveRestoreEnabled {
args = []string{"--live-restore"}
}
d.StartWithBusybox(t, args...)
defer d.Stop(t)
ctx := context.Background()
resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "")
if err != nil {
t.Fatal(err)
}
defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
if err := client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
t.Fatal(err)
}
stopDaemon(t, d)
d.Start(t, args...)
expected := c.xRunning
if liveRestoreEnabled {
expected = c.xRunningLiveRestore
}
var running bool
for i := 0; i < 30; i++ {
inspect, err := client.ContainerInspect(ctx, resp.ID)
if err != nil {
t.Fatal(err)
}
running = inspect.State.Running
if running == expected {
break
}
time.Sleep(2 * time.Second)
}
if running != expected {
t.Fatalf("got unexpected running state, expected %v, got: %v", expected, running)
}
// TODO(cpuguy83): test pause states... this seems to be rather undefined currently
})
}
}
}
}

View File

@ -16,7 +16,7 @@ import (
)
// PluginNamespace is the name used for the plugins namespace
var PluginNamespace = "moby-plugins"
var PluginNamespace = "plugins.moby"
// ExitHandler represents an object that is called when the exit event is received from containerd
type ExitHandler interface {

View File

@ -1,17 +1,120 @@
# Docker Governance Advisory Board Meetings
# Moby project governance
In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public.
All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership.
Moby projects are governed by the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc).
See the Moby TSC [charter](https://github.com/moby/tsc/blob/master/README.md) for
further information on the role of the TSC and procedures for escalation
of technical issues or concerns.
The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at
[Google Docs Folder](https://goo.gl/Alfj8r)
Contact [any Moby TSC member](https://github.com/moby/tsc/blob/master/MEMBERS.md) with your questions/concerns about the governance or a specific technical
issue that you feel requires escalation.
These include:
## Project maintainers
* First Meeting Notes
* DGAB Charter
* Presentation 1: Introductory Presentation, including State of The Project
* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal
* Presentation 3: Long Term Roadmap/Statement of Direction
The current maintainers of the moby/moby repository are listed in the
[MAINTAINERS](/MAINTAINERS) file.
There are different types of maintainers, with different responsibilities, but
all maintainers have 3 things in common:
1. They share responsibility in the project's success.
2. They have made a long-term, recurring time investment to improve the project.
3. They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun.
Maintainers are often under-appreciated, because their work is less visible.
It's easy to recognize a really cool and technically advanced feature. It's harder
to appreciate the absence of bugs, the slow but steady improvement in stability,
or the reliability of a release process. But those things distinguish a good
project from a great one.
### Adding maintainers
Maintainers are first and foremost contributors who have shown their
commitment to the long term success of a project. Contributors who want to
become maintainers first demonstrate commitment to the project by contributing
code, reviewing others' work, and triaging issues on a regular basis for at
least three months.
The contributions alone don't make you a maintainer. You need to earn the
trust of the current maintainers and other project contributors, that your
decisions and actions are in the best interest of the project.
Periodically, the existing maintainers curate a list of contributors who have
shown regular activity on the project over the prior months. From this
list, maintainer candidates are selected and proposed on the maintainers
mailing list.
After a candidate is announced on the maintainers mailing list, the
existing maintainers discuss the candidate over the next 5 business days,
provide feedback, and vote. At least 66% of the current maintainers must
vote in the affirmative.
If a candidate is approved, a maintainer contacts the candidate to
invite them to open a pull request that adds the contributor to
the MAINTAINERS file. The candidate becomes a maintainer once the pull
request is merged.
### Removing maintainers
Maintainers can be removed from the project, either at their own request
or due to [project inactivity](#inactive-maintainer-policy).
#### How to step down
Life priorities, interests, and passions can change. If you're a maintainer but
feel you must remove yourself from the list, inform other maintainers that you
intend to step down, and if possible, help find someone to pick up your work.
At the very least, ensure your work can be continued where you left off.
After you've informed other maintainers, create a pull request to remove
yourself from the MAINTAINERS file.
#### Inactive maintainer policy
An existing maintainer can be removed if they do not show significant activity
on the project. Periodically, the maintainers review the list of maintainers
and their activity over the last three months.
If a maintainer has shown insufficient activity over this period, a project
representative will contact the maintainer to ask if they want to continue
being a maintainer. If the maintainer decides to step down as a maintainer,
they open a pull request to be removed from the MAINTAINERS file.
If the maintainer wants to continue in this role, but is unable to perform the
required duties, they can be removed with a vote by at least 66% of the current
maintainers. The maintainer under discussion will not be allowed to vote. An
e-mail is sent to the mailing list, inviting maintainers of the project to
vote. The voting period is five business days. Issues related to a maintainer's
performance should be discussed with them among the other maintainers so that
they are not surprised by a pull request removing them. This discussion should
be handled objectively with no ad hominem attacks.
## Project decision making
Short answer: **Everything is a pull request**.
The Moby core engine project is an open-source project with an open design
philosophy. This means that the repository is the source of truth for **every**
aspect of the project, including its philosophy, design, road map, and APIs.
*If it's part of the project, it's in the repo. If it's in the repo, it's part
of the project.*
As a result, each decision can be expressed as a change to the repository. An
implementation change is expressed as a change to the source code. An API
change is a change to the API specification. A philosophy change is a change
to the philosophy manifesto, and so on.
All decisions affecting the moby/moby repository, both big and small, follow
the same steps:
* **Step 1**: Open a pull request. Anyone can do this.
* **Step 2**: Discuss the pull request. Anyone can do this.
* **Step 3**: Maintainers merge, close or reject the pull request.
Pull requests are reviewed by the current maintainers of the moby/moby
repository. Weekly meetings are organized to are organized to synchronously
discuss tricky PRs, as well as design and architecture decisions.. When
technical agreement cannot be reached among the maintainers of the project,
escalation or concerns can be raised by opening an issue to be handled
by the [Moby Technical Steering Committee](https://github.com/moby/tsc).