diff --git a/components/engine/.mailmap b/components/engine/.mailmap index 683758650e..47860de4c3 100644 --- a/components/engine/.mailmap +++ b/components/engine/.mailmap @@ -1,4 +1,9 @@ -# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf + @@ -47,8 +52,9 @@ Shih-Yuan Lee Daniel Mizyrycki root Jean-Baptiste Dalido - - + + + @@ -78,3 +84,16 @@ Sridhar Ratnakumar Liang-Chi Hsieh Aleksa Sarai Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + +Francisco Carriedo + diff --git a/components/engine/.travis.yml b/components/engine/.travis.yml index ae03d6cde5..55fa9044c9 100644 --- a/components/engine/.travis.yml +++ b/components/engine/.travis.yml @@ -3,10 +3,20 @@ language: go -go: 1.2 +go: +# This should match the version in the Dockerfile. + - 1.3.1 +# Test against older versions too, just for a little extra retrocompat. + - 1.2 + +# Let us have pretty experimental Docker-based Travis workers. +# (These spin up much faster than the VM-based ones.) +sudo: false # Disable the normal go build. -install: true +install: + - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false") + - export AUTO_GOPATH=1 before_script: - env | sort @@ -14,5 +24,7 @@ before_script: script: - hack/make.sh validate-dco - hack/make.sh validate-gofmt + - ./hack/make.sh dynbinary + - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary # vim:set sw=2 ts=2: diff --git a/components/engine/AUTHORS b/components/engine/AUTHORS index 10f01fb589..43904e9e34 100644 --- a/components/engine/AUTHORS +++ b/components/engine/AUTHORS @@ -1,5 +1,5 @@ # This file lists all individuals having contributed content to the repository. -# For how it is generated, see `.mailmap`. +# For how it is generated, see `hack/generate-authors.sh`. Aanand Prasad Aaron Feng @@ -9,33 +9,43 @@ Adam Miller Adam Singer Aditya Adrian Mouat +Adrien Folie +AJ Bowen +Al Tobey alambike +Albert Zhang Aleksa Sarai +Alex Gaynor +Alex Warhawk Alexander Larsson +Alexander Shopov Alexandr Morozov Alexey Kotlyarov Alexey Shamrin -Alex Gaynor Alexis THOMAS almoehi -Al Tobey amangoel +AnandkumarPatel +Andre Dublin <81dublin@gmail.com> Andrea Luzzardi +Andrea Turli Andreas Savvides Andreas Tiefenthaler -Andrea Turli Andrew Duckworth +Andrew France Andrew Macgregor Andrew Munsell -Andrews Medina +Andrew Weiss Andrew Williams +Andrews Medina Andy Chambers andy diller Andy Goldstein Andy Kipp -Andy Rothfusz +Andy Rothfusz Andy Smith Anthony Bishopric +Anton Löfgren Anton Nikitin Antony Messerli apocas @@ -44,25 +54,34 @@ Asbjørn Enge Barnaby Gray Barry Allard Bartłomiej Piotrowski -Benjamin Atkin -Benoit Chesneau +bdevloed +Ben Firshman Ben Sargent Ben Toews Ben Wiklund +Benjamin Atkin +Benoit Chesneau Bernerd Schaefer Bhiraj Butala bin liu Bouke Haarsma +Boyd Hemphill Brandon Liu Brandon Philips +Brandon Rhodes +Brett Kochendorfer +Brian (bex) Exelbierd Brian Dorsey Brian Flad Brian Goff Brian McCallister Brian Olsen Brian Shumate +Brice Jaglin Briehan Lombaard Bruno Bigras +Bruno Renié +Bryan Bess Bryan Matsuo Bryan Murphy Caleb Spare @@ -73,19 +92,35 @@ Charles Hooper Charles Lindsay Charles Merriam Charlie Lewis +Chewey Chia-liang Kao +Chris Alfonso +Chris Snow Chris St. Pierre +chrismckinnel +Christian Berendt +ChristoperBiscardi +Christophe Troestler Christopher Currie Christopher Rigor -Christophe Troestler +Ciro S. Costa Clayton Coleman Colin Dunklau Colin Rice +Colin Walters Cory Forsyth +cpuguy83 cressie176 +Cruceru Calin-Cristian +Daan van Berkel Dafydd Crosby Dan Buch Dan Hirsch +Dan Keder +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams Daniel Exner Daniel Garcia Daniel Gasienica @@ -95,22 +130,21 @@ Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin -Dan Keder -Dan McPherson +Daniel, Dao Quang Minh Danny Berger Danny Yates -Dan Stine -Dan Walsh -Dan Williams Darren Coxall Darren Shepherd David Anderson David Calavera +David Corking David Gageot David Mcanulty David Röthlisberger David Sissitka Deni Bertovic +Derek +Deric Crago Dinesh Subhraveti Djibril Koné dkumor @@ -118,8 +152,10 @@ Dmitry Demeshchuk Dolph Mathews Dominik Honnef Don Spaulding -Dražen Lučanin +Doug Davis +doug tangren Dr Nic Williams +Dražen Lučanin Dustin Sallings Edmund Wagner Eiichi Tsukata @@ -130,13 +166,17 @@ Emily Rose Eric Hanchrow Eric Lee Eric Myhre -Erik Hollensbe +Eric Windisch +Eric Windisch +Erik Hollensbe +Erik Inge Bolsø Erno Hopearuoho eugenkrizo Evan Hazlett Evan Krall Evan Phoenix Evan Wies +evanderkoogh Eystein Måløy Stenberg ezbercih Fabio Falci @@ -147,12 +187,16 @@ Fareed Dudhia Felix Rabe Fernando Flavio Castelli +FLGMwt +Francisco Carriedo Francisco Souza Frank Macreery +Fred Lifton Frederick F. Kautz IV Frederik Loeffert Freek Kalter Gabe Rosenhouse +Gabor Nagy Gabriel Monroy Galen Sampson Gareth Rushgrove @@ -160,75 +204,106 @@ Geoffrey Bachelet Gereon Frey German DZ Gert van Valkenhoef +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Glyn Normington Goffert van Gool Graydon Hoare Greg Thornton grunny +Guilherme Salgado Guillaume J. Charmes Gurjeet Singh Guruprasad +Harald Albers Harley Laue Hector Castro +Henning Sprang Hobofan +Hollie Teal +Hollie Teal +hollietealok Hunter Blanks +hyeongkyu.lee +Ian Babrou +Ian Bull +Ian Main Ian Truslove ILYA Khlopotov inglesp Isaac Dupree Isabel Jimenez Isao Jonas +Ivan Fraixedes Jack Danger Canty -jakedt Jake Moshenko +jakedt James Allen James Carr James DeFelice James Harrison Fisher +James Kyle James Mills James Turnbull +Jan Pazdziora +Jan Toebes +Jaroslaw Zabiello jaseg +Jason Giedymin +Jason Hall +Jason Livesay Jason McVetta Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido Jeff Lindsay +Jeff Welch +Jeffrey Bolle Jeremy Grosser -Jérôme Petazzoni Jesse Dubay +Jezeniel Zapanta Jilles Oldenbeuving Jim Alateras +Jim Perrin Jimmy Cuadra +Jiří Župka Joe Beda -Joel Handwell Joe Shaw Joe Van Dyk +Joel Handwell Joffrey F Johan Euphrosine -Johannes 'fish' Ziemke Johan Rydberg +Johannes 'fish' Ziemke John Costa John Feminella John Gardiner Myers +John OBrien III John Warwick +Jon Wedaman Jonas Pfenniger +Jonathan Boulle +Jonathan Camp Jonathan McCrohan Jonathan Mueller Jonathan Pares Jonathan Rudenberg -Jon Wedaman Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager +Josh Josh Hawn Josh Poimboeuf JP Julien Barbier +Julien Bordellier Julien Dubois Justin Force Justin Plock Justin Simonelis +Jérôme Petazzoni Karan Lyons Karl Grzeszczak Kato Kazuyoshi @@ -236,36 +311,49 @@ Kawsar Saiyeed Keli Hu Ken Cochrane Ken ICHIKAWA +Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh Kevin Menard Kevin Wallace Keyvan Fatehi -kim0 +kies Kim BKC Carlbacker +kim0 Kimbro Staken Kiran Gangadharan +knappe +Kohei Tsuruta Konstantin Pelykh Kyle Conroy +kyu +Lachlan Coote lalyos Lance Chen Lars R. Damerow Laurie Voss +leeplay +Len Weincier +Levi Gross Lewis Peckover Liang-Chi Hsieh -Lokesh Mandvekar +Lokesh Mandvekar Louis Opter lukaspustina lukemarsden Mahesh Tiyyagura +Manfred Zabarauskas Manuel Meurer Manuel Woelker Marc Abramowitz Marc Kuo +Marc Tamsky Marco Hennings Marcus Farkas Marcus Ramberg +marcuslinke Marek Goldmann +Marius Voila Mark Allen Mark McGranaghan Marko Mikulicic @@ -278,30 +366,40 @@ Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Haggard +Matthew Heon Matthew Mueller Matthias Klumpp Matthias Kühnle mattymo -Maxime Petazzoni -Maxim Treskin +mattyw Max Shytikov +Maxim Treskin +Maxime Petazzoni meejah Michael Brown -Michael Crosby +Michael Crosby Michael Gorsuch Michael Neale +Michael Prokop Michael Stapelberg +Michaël Pailloncy +Michiel@unhosted Miguel Angel Fernández +Mike Chelen Mike Gaffney Mike MacCana Mike Naberezny +Mike Snitzer Mikhail Sobolev Mohit Soni Morgante Pell Morten Siebuhr +Mrunal Patel Nan Monnand Deng +Naoki Orii Nate Jones Nathan Kleyn +Nathan LeClaire Nelson Chen Niall O'Higgins Nick Payne @@ -309,15 +407,20 @@ Nick Stenning Nick Stinemates Nicolas Dudebout Nicolas Kaiser +NikolaMandic noducks Nolan Darilek +O.S. Tezer +OddBloke odk- Oguz Bilgic Ole Reifschneider -O.S. Tezer +Olivier Gambier pandrew Pascal Borreli +Patrick Hemmer pattichen +Paul Paul Annesley Paul Bowsher Paul Hammond @@ -325,49 +428,71 @@ Paul Jimenez Paul Lietar Paul Morie Paul Nasrat -Paul +Paul Weaver +Peter Bourgon Peter Braden -Peter Waller -Phillip Alexander +Peter Waller +Phil Phil Spitler +Phillip Alexander Piergiuliano Bossi Pierre-Alain RIVIERE Piotr Bogdan pysqz Quentin Brossard +r0n22 Rafal Jeczalik Rajat Pandit +Rajdeep Dua Ralph Bean Ramkumar Ramachandra Ramon van Alteren Renato Riccieri Santos Zannon rgstephens Rhys Hiltner +Richard Harvey Richo Healey Rick Bradley +Rick van de Loo +Robert Bachmann Robert Obryk Roberto G. Hashioka +Robin Speekenbrink robpc Rodrigo Vaz Roel Van Nyen Roger Peppe Rohit Jnagal +Roland Huß Roland Moriz +Ron Smits Rovanion Luckey +Rudolph Gottesheim +Ryan Anderson Ryan Aslett Ryan Fowler Ryan O'Donnell Ryan Seto Ryan Thomas +s-ko Sam Alba +Sam Bailey Sam J Sharpe +Sam Reis Sam Rijs Samuel Andaya +satoru +Satoshi Amemiya Scott Bessler Scott Collier Sean Cronin Sean P. Kane -Sébastien Stormacq +Sebastiaan van Stijn +Sebastiaan van Stijn +Senthil Kumar Selvaraj +SeongJae Park +Shane Canon +shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee @@ -378,14 +503,19 @@ Sjoerd Langkemper Solomon Hykes Song Gao Soulou +soulshake Sridatta Thatipamala Sridhar Ratnakumar Steeve Morin Stefan Praszalowicz +Stephen Crosby Steven Burgess sudosurootdev -Sven Dowideit +Sven Dowideit Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq tang0th Tatsuki Sugiura Tehmasp Chaudhri @@ -400,19 +530,24 @@ Thomas Schroeter Tianon Gravi Tibor Vass Tim Bosse -Timothy Hobbs Tim Ruffles +Tim Ruffles Tim Terhorst +Timothy Hobbs tjmehta Tobias Bieniek +Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter Tom Fotherby Tom Hulihan +Tom Maaswinkel Tommaso Visconti Tony Daws +tpng Travis Cline +Trent Ogren Tyler Brock Tzu-Jung Lee Ulysse Carion @@ -434,21 +569,29 @@ Vivek Agarwal Vladimir Bulyga Vladimir Kirillov Vladimir Rutsky +waitingkuo Walter Leibbrandt Walter Stanish WarheadsSE Wes Morgan Will Dietz -William Delanoue -William Henry Will Rouesnel Will Weaver +William Delanoue +William Henry +William Riancho +William Thurston +wyc Xiuming Chen Yang Bai Yasunori Mahata Yurii Rashkovskii +Zac Dover Zain Memon Zaiste! +Zane DeGraffenried Zilin Du zimbatm +Zoltan Tombol zqh +Álvaro Lázaro diff --git a/components/engine/CHANGELOG.md b/components/engine/CHANGELOG.md index 8ec9ce3df0..9b89ea46b3 100644 --- a/components/engine/CHANGELOG.md +++ b/components/engine/CHANGELOG.md @@ -313,7 +313,7 @@ - Add newlines to the JSON stream functions. #### Runtime -* Do not ping the registry from the CLI. All requests to registres flow through the daemon. +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. - Check for nil information return in the lxc driver. This fixes panics with older lxc versions. - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. @@ -905,7 +905,7 @@ With the ongoing changes to the networking and execution subsystems of docker te + Add domainname support + Implement image filtering with path.Match -* Remove unnecesasry warnings +* Remove unnecessary warnings * Remove os/user dependency * Only mount the hostname file when the config exists * Handle signals within the `docker login` command @@ -928,7 +928,7 @@ With the ongoing changes to the networking and execution subsystems of docker te + Hack: Vendor all dependencies * Remote API: Bump to v1.5 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. -* Documentation: General improvments +* Documentation: General improvements ## 0.6.1 (2013-08-23) @@ -1198,7 +1198,7 @@ With the ongoing changes to the networking and execution subsystems of docker te * Prevent rm of running containers * Use go1.1 cookiejar - Fix issue detaching from running TTY container -- Forbid parralel push/pull for a single image/repo. Fixes #311 +- Forbid parallel push/pull for a single image/repo. Fixes #311 - Fix race condition within Run command when attaching. #### Client @@ -1314,7 +1314,7 @@ With the ongoing changes to the networking and execution subsystems of docker te + Add caching to docker builder + Add support for docker builder with native API as top level command + Implement ENV within docker builder -- Check the command existance prior create and add Unit tests for the case +- Check the command existence prior create and add Unit tests for the case * use any whitespaces instead of tabs #### Runtime @@ -1353,13 +1353,13 @@ With the ongoing changes to the networking and execution subsystems of docker te #### Runtime -- Fix the command existance check +- Fix the command existence check - strings.Split may return an empty string on no match - Fix an index out of range crash if cgroup memory is not #### Documentation -* Various improvments +* Various improvements + New example: sharing data between 2 couchdb databases #### Other @@ -1389,7 +1389,7 @@ With the ongoing changes to the networking and execution subsystems of docker te ## 0.2.0 (2013-04-23) - Runtime: ghost containers can be killed and waited for -* Documentation: update install intructions +* Documentation: update install instructions - Packaging: fix Vagrantfile - Development: automate releasing binaries and ubuntu packages + Add a changelog diff --git a/components/engine/CONTRIBUTING.md b/components/engine/CONTRIBUTING.md index d07b972eb7..3ed8bf9d43 100644 --- a/components/engine/CONTRIBUTING.md +++ b/components/engine/CONTRIBUTING.md @@ -4,14 +4,52 @@ Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels wrong or incomplete. +## Topics + +* [Security Reports](#security-reports) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-issues) +* [Build Environment](#build-environment) +* [Contribution Guidelines](#contribution-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Security Reports + +Please **DO NOT** file an issue for security related issues. Please send your +reports to [security@docker.com](mailto:security@docker.com) instead. + +## Design and Cleanup Proposals + +When considering a design proposal, we are looking for: + +* A description of the problem this design proposal solves +* An issue -- not a pull request -- that describes what you will take action on + * Please prefix your issue with `Proposal:` in the title +* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) + before reporting a new issue. You can always pair with someone if you both + have the same idea. + +When considering a cleanup task, we are looking for: + +* A description of the refactors made + * Please note any logic changes if necessary +* A pull request with the code + * Please prefix your PR's title with `Cleanup:` so we can quickly address it. + * Your pull request must remain up to date with master, so rebase as necessary. + ## Reporting Issues -When reporting [issues](https://github.com/dotcloud/docker/issues) -on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), -the output of `uname -a` and the output of `docker version` along with -the output of `docker -D info`. Please include the steps required to reproduce -the problem if possible and applicable. -This information will help us review and fix your issue faster. +When reporting [issues](https://github.com/docker/docker/issues) on +GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc). +Please include: + +* The output of `uname -a`. +* The output of `docker version`. +* The output of `docker -D info`. + +Please also include the steps required to reproduce the problem if +possible and applicable. This information will help us review and fix +your issue faster. ## Build Environment @@ -34,7 +72,7 @@ received feedback on what to improve. We're trying very hard to keep Docker lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement -that feature *on top of* docker. +that feature *on top of* Docker. ### Discuss your design on the mailing list @@ -48,7 +86,7 @@ else is working on the same thing. ### Create issues... Any significant improvement should be documented as [a GitHub -issue](https://github.com/dotcloud/docker/issues) before anybody +issue](https://github.com/docker/docker/issues) before anybody starts working on it. ### ...but check for existing issues first! @@ -60,12 +98,12 @@ help prioritize the most common problems and requests. ### Conventions -Fork the repo and make changes on your fork in a feature branch: +Fork the repository and make changes on your fork in a feature branch: -- If it's a bugfix branch, name it XXX-something where XXX is the number of the - issue +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the + issue. - If it's a feature branch, create an enhancement issue to announce your - intentions, and name it XXX-something where XXX is the number of the issue. + intentions, and name it XXXX-something where XXXX is the number of the issue. Submit unit tests for your changes. Go has a great test framework built in; use it! Take a look at existing tests for inspiration. Run the full test suite on @@ -73,18 +111,16 @@ your branch before submitting a pull request. Update the documentation when creating or modifying features. Test your documentation changes for clarity, concision, and correctness, as -well as a clean documentation build. See ``docs/README.md`` for more -information on building the docs and how docs get released. +well as a clean documentation build. See `docs/README.md` for more +information on building the docs and how they get released. Write clean code. Universally formatted code promotes ease of writing, reading, and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plugins that do this automatically. +committing your changes. Most editors have plug-ins that do this automatically. Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. -Pull requests must not contain commits from other users or branches. - Commit messages must start with a capitalized and short summary (max. 50 chars) written in the imperative, followed by an optional, more detailed explanatory text which is separated from the summary by an empty line. @@ -95,26 +131,33 @@ sure to post a comment after pushing. The new commits will show up in the pull request automatically, but the reviewers will not be notified unless you comment. +Pull requests must be cleanly rebased ontop of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + Before the pull request is merged, make sure that you squash your commits into logical units of work using `git rebase -i` and `git push -f`. After every commit the test suite should be passing. Include documentation changes in the same commit so that a revert would remove all traces of the feature or fix. -Commits that fix or close an issue should include a reference like `Closes #XXX` -or `Fixes #XXX`, which will automatically close the issue when merged. +Commits that fix or close an issue should include a reference like +`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the +issue when merged. -Please do not add yourself to the AUTHORS file, as it is regenerated +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly from the Git history. ### Merge approval -Docker maintainers use LGTM (looks good to me) in comments on the code review +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to indicate acceptance. A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects docs/ and registry/, it -needs an absolute majority from the maintainers of docs/ AND, separately, an -absolute majority of the maintainers of registry. +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) @@ -137,7 +180,6 @@ San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: @@ -165,20 +207,18 @@ By making a contribution to this project, I certify that: this project or the open source license(s) involved. ``` -then you just add a line to every git commit message: +Then you just add a line to every git commit message: - Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) + Signed-off-by: Joe Smith -using your real name (sorry, no pseudonyms or anonymous contributions.) +Using your real name (sorry, no pseudonyms or anonymous contributions.) -One way to automate this, is customise your get ``commit.template`` by adding -a ``prepare-commit-msg`` hook to your docker checkout: +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. -``` -curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg -``` - -* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` +Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still +accepted, so there is no need to update outstanding pull requests to the new +format right away, but please do adjust your processes for future contributions. #### Small patch exception @@ -194,11 +234,83 @@ If you have any questions, please refer to the FAQ in the [docs](http://docs.doc ### How can I become a maintainer? -* Step 1: learn the component inside out -* Step 2: make yourself useful by contributing code, bugfixes, support etc. -* Step 3: volunteer on the irc channel (#docker@freenode) -* Step 4: propose yourself at a scheduled docker meeting in #docker-dev +* Step 1: Learn the component inside out +* Step 2: Make yourself useful by contributing code, bug fixes, support etc. +* Step 3: Volunteer on the IRC channel (#docker at Freenode) +* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev -Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. -You don't have to be a maintainer to make a difference on the project! +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +### IRC Meetings + +There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones. +Anybody can ask for a topic to be discussed prior to the meeting. + +If you feel the conversation is going off-topic, feel free to point it out. + +For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes). +They also contain all the notes from previous meetings. + +## Docker Community Guidelines + +We want to keep the Docker community awesome, growing and collaborative. We +need your help to keep it that way. To help with this we've come up with some +general guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: no + regional, racial, gender, or other abuse will be tolerated. We like nice people + way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community + feel welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break the + law. + +* Stay on topic: Make sure that you are posting to the correct channel + and avoid off-topic discussions. Remember when you update an issue or + respond to an email you are potentially sending to a large number of + people. Please consider this before you update. Also remember that + nobody likes spam. + +### Guideline Violations — 3 Strikes Method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't + hold a grudge. + +* People who commit minor infractions will get some education, + rather than hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how + much you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or + forgiveness. + +* Contact james@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with + a fair solution if there has been a misunderstanding. diff --git a/components/engine/Dockerfile b/components/engine/Dockerfile index 283e0a3262..8f47b0de75 100644 --- a/components/engine/Dockerfile +++ b/components/engine/Dockerfile @@ -6,7 +6,7 @@ # docker build -t docker . # # # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test @@ -28,8 +28,7 @@ FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ - apt-utils \ +RUN apt-get update && apt-get install -y \ aufs-tools \ automake \ btrfs-tools \ @@ -43,7 +42,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ libsqlite3-dev \ lxc=1.0* \ mercurial \ - pandoc \ + parallel \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ @@ -60,9 +59,10 @@ RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go -RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz +RUN curl -sSL https://golang.org/dl/go1.3.1.src.tar.gz | tar -v -C /usr/local -xz ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV PATH /go/bin:$PATH RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation @@ -80,6 +80,12 @@ RUN go get code.google.com/p/go.tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 +# Install man page generator +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... + # Get the "busybox" image source so we can build locally instead of pulling RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox @@ -94,11 +100,11 @@ RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker -WORKDIR /go/src/github.com/dotcloud/docker +WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source -ADD . /go/src/github.com/dotcloud/docker +COPY . /go/src/github.com/docker/docker diff --git a/components/engine/FIXME b/components/engine/FIXME deleted file mode 100644 index 4f27d36856..0000000000 --- a/components/engine/FIXME +++ /dev/null @@ -1,24 +0,0 @@ - -## FIXME - -This file is a loose collection of things to improve in the codebase, for the internal -use of the maintainers. - -They are not big enough to be in the roadmap, not user-facing enough to be github issues, -and not important enough to be discussed in the mailing list. - -They are just like FIXME comments in the source code, except we're not sure where in the source -to put them - so we put them here :) - - -* Run linter on codebase -* Unify build commands and regular commands -* Move source code into src/ subdir for clarity -* docker build: on non-existent local path for ADD, don't show full absolute path on the host -* use size header for progress bar in pull -* Clean up context upload in build!!! -* Parallel pull -* Upgrade dockerd without stopping containers -* Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^/ { print $3 }')`) -* Simple command to clean up containers for disk space -* Clean up the ProgressReader api, it's a PITA to use diff --git a/components/engine/Makefile b/components/engine/Makefile index 2d07b39c3b..40c623a47a 100644 --- a/components/engine/Makefile +++ b/components/engine/Makefile @@ -1,7 +1,8 @@ .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate # to allow `make BINDDIR=. shell` or `make BINDDIR= test` -BINDDIR := bundles +# (default to no bind mount if DOCKER_HOST is set) +BINDDIR := $(if $(DOCKER_HOST),,bundles) # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 @@ -9,7 +10,7 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)") +DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" # to allow `make DOCSDIR=docs docs-shell` @@ -33,7 +34,7 @@ docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-release: docs-build - $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh + $(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli diff --git a/components/engine/README.md b/components/engine/README.md index 3c378de6f4..857cd3c70a 100644 --- a/components/engine/README.md +++ b/components/engine/README.md @@ -131,9 +131,8 @@ Here's a typical Docker build process: ```bash FROM ubuntu:12.04 -RUN apt-get update -RUN apt-get install -q -y python python-pip curl -RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv RUN cd helloflask-master && pip install -r requirements.txt ``` @@ -178,6 +177,9 @@ Under the hood, Docker is built on the following components: Contributing to Docker ====================== +[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) +[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker) + Want to hack on Docker? Awesome! There are instructions to get you started [here](CONTRIBUTING.md). diff --git a/components/engine/VERSION b/components/engine/VERSION index 45a1b3f445..fe00fb1348 100644 --- a/components/engine/VERSION +++ b/components/engine/VERSION @@ -1 +1 @@ -1.1.2 +1.1.2-dev \ No newline at end of file diff --git a/components/engine/api/README.md b/components/engine/api/README.md index 3ef33f8c29..453f61a1a1 100644 --- a/components/engine/api/README.md +++ b/components/engine/api/README.md @@ -1,5 +1,5 @@ This directory contains code pertaining to the Docker API: - - Used by the docker client when comunicating with the docker deamon + - Used by the docker client when communicating with the docker daemon - - Used by third party tools wishing to interface with the docker deamon + - Used by third party tools wishing to interface with the docker daemon diff --git a/components/engine/api/client/cli.go b/components/engine/api/client/cli.go index bb5d191e16..d80f9cc32c 100644 --- a/components/engine/api/client/cli.go +++ b/components/engine/api/client/cli.go @@ -10,11 +10,24 @@ import ( "strings" "text/template" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/registry" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" ) +type DockerCli struct { + proto string + addr string + configFile *registry.ConfigFile + in io.ReadCloser + out io.Writer + err io.Writer + isTerminal bool + terminalFd uintptr + tlsConfig *tls.Config + scheme string +} + var funcMap = template.FuncMap{ "json": func(v interface{}) string { a, _ := json.Marshal(v) @@ -34,7 +47,8 @@ func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { return method.Interface().(func(...string) error), true } -func (cli *DockerCli) ParseCommands(args ...string) error { +// Cmd executes the specified command +func (cli *DockerCli) Cmd(args ...string) error { if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { @@ -97,16 +111,3 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC scheme: scheme, } } - -type DockerCli struct { - proto string - addr string - configFile *registry.ConfigFile - in io.ReadCloser - out io.Writer - err io.Writer - isTerminal bool - terminalFd uintptr - tlsConfig *tls.Config - scheme string -} diff --git a/components/engine/api/client/commands.go b/components/engine/api/client/commands.go index df2125f5f3..81b0668cda 100644 --- a/components/engine/api/client/commands.go +++ b/components/engine/api/client/commands.go @@ -22,19 +22,21 @@ import ( "text/template" "time" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/opts" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/pkg/units" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/utils/filters" + "github.com/docker/docker/api" + "github.com/docker/docker/archive" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) const ( @@ -67,7 +69,8 @@ func (cli *DockerCli) CmdHelp(args ...string) error { {"inspect", "Return low-level information on a container"}, {"kill", "Kill a running container"}, {"load", "Load an image from a tar archive"}, - {"login", "Register or log in to the Docker registry server"}, + {"login", "Register or log in to a Docker registry server"}, + {"logout", "Log out from a Docker registry server"}, {"logs", "Fetch the logs of a container"}, {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"}, {"pause", "Pause all processes within a container"}, @@ -161,28 +164,32 @@ func (cli *DockerCli) CmdBuild(args ...string) error { if _, err = os.Stat(filename); os.IsNotExist(err) { return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) } - if err = utils.ValidateContextDirectory(root); err != nil { + var excludes []string + ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("Error reading .dockerignore: '%s'", err) + } + for _, pattern := range strings.Split(string(ignore), "\n") { + ok, err := filepath.Match(pattern, "Dockerfile") + if err != nil { + return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err) + } + if ok { + return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern) + } + excludes = append(excludes, pattern) + } + if err = utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) } options := &archive.TarOptions{ Compression: archive.Uncompressed, - } - if ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("Error reading .dockerignore: '%s'", err) - } else if err == nil { - for _, pattern := range strings.Split(string(ignore), "\n") { - ok, err := filepath.Match(pattern, "Dockerfile") - if err != nil { - utils.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err) - continue - } - if ok { - return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern) - } - options.Excludes = append(options.Excludes, pattern) - } + Excludes: excludes, } context, err = archive.TarWithOptions(root, options) + if err != nil { + return err + } } var body io.Reader // Setup an upload progress bar @@ -196,7 +203,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { //Check if the given image name can be resolved if *tag != "" { - repository, _ := utils.ParseRepositoryTag(*tag) + repository, _ := parsers.ParseRepositoryTag(*tag) if _, _, err := registry.ResolveRepositoryName(repository); err != nil { return err } @@ -349,6 +356,32 @@ func (cli *DockerCli) CmdLogin(args ...string) error { return nil } +// log out from a Docker registry +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + if err := cmd.Parse(args); err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + cli.LoadConfigFile() + if _, ok := cli.configFile.Configs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + } else { + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.Configs, serverAddress) + + if err := registry.SaveConfig(cli.configFile); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + } + return nil +} + // 'docker wait': block until a container stops func (cli *DockerCli) CmdWait(args ...string) error { cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") @@ -391,6 +424,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } + fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH) body, _, err := readBody(cli.call("GET", "/version", nil, false)) if err != nil { @@ -400,11 +434,11 @@ func (cli *DockerCli) CmdVersion(args ...string) error { out := engine.NewOutput() remoteVersion, err := out.AddEnv() if err != nil { - utils.Errorf("Error reading remote version: %s\n", err) + log.Errorf("Error reading remote version: %s", err) return err } if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote version: %s\n", err) + log.Errorf("Error reading remote version: %s", err) return err } out.Close() @@ -440,7 +474,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote info: %s\n", err) + log.Errorf("Error reading remote info: %s", err) return err } out.Close() @@ -457,6 +491,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) @@ -471,9 +506,6 @@ func (cli *DockerCli) CmdInfo(args ...string) error { if initPath := remoteInfo.Get("InitPath"); initPath != "" { fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) } - if len(remoteInfo.GetList("Sockets")) != 0 { - fmt.Fprintf(cli.out, "Sockets: %v\n", remoteInfo.GetList("Sockets")) - } } if len(remoteInfo.GetList("IndexServerAddress")) != 0 { @@ -551,7 +583,7 @@ func (cli *DockerCli) CmdRestart(args ...string) error { } func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { - sigc := make(chan os.Signal, 1) + sigc := make(chan os.Signal, 128) signal.CatchAll(sigc) go func() { for s := range sigc { @@ -566,10 +598,10 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { } } if sig == "" { - utils.Errorf("Unsupported signal: %d. Discarding.", s) + log.Errorf("Unsupported signal: %d. Discarding.", s) } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { - utils.Debugf("Error sending signal: %s", err) + log.Debugf("Error sending signal: %s", err) } } }() @@ -659,7 +691,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { if *openStdin || *attach { if tty && cli.isTerminal { if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) + log.Errorf("Error monitoring TTY size: %s", err) } } return <-cErr @@ -982,7 +1014,7 @@ func (cli *DockerCli) CmdRm(args ...string) error { cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") - force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") if err := cmd.Parse(args); err != nil { return nil @@ -991,6 +1023,7 @@ func (cli *DockerCli) CmdRm(args ...string) error { cmd.Usage() return nil } + val := url.Values{} if *v { val.Set("v", "1") @@ -998,6 +1031,7 @@ func (cli *DockerCli) CmdRm(args ...string) error { if *link { val.Set("link", "1") } + if *force { val.Set("force", "1") } @@ -1051,16 +1085,19 @@ func (cli *DockerCli) CmdImport(args ...string) error { return nil } - var src, repository, tag string + var ( + v = url.Values{} + src = cmd.Arg(0) + repository = cmd.Arg(1) + ) + + v.Set("fromSrc", src) + v.Set("repo", repository) if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") - src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - src = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + v.Set("tag", cmd.Arg(2)) } - v := url.Values{} if repository != "" { //Check if the given image name can be resolved @@ -1069,10 +1106,6 @@ func (cli *DockerCli) CmdImport(args ...string) error { } } - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("fromSrc", src) - var in io.Reader if src == "-" { @@ -1096,7 +1129,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { cli.LoadConfigFile() - remote, tag := utils.ParseRepositoryTag(name) + remote, tag := parsers.ParseRepositoryTag(name) // Resolve the Repository name from fqn to hostname + name hostname, _, err := registry.ResolveRepositoryName(remote) @@ -1158,12 +1191,18 @@ func (cli *DockerCli) CmdPull(args ...string) error { cmd.Usage() return nil } + var ( + v = url.Values{} + remote = cmd.Arg(0) + ) + + v.Set("fromImage", remote) - remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) if *tag == "" { - *tag = parsedTag + v.Set("tag", *tag) } + remote, _ = parsers.ParseRepositoryTag(remote) // Resolve the Repository name from fqn to hostname + name hostname, _, err := registry.ResolveRepositoryName(remote) if err != nil { @@ -1174,9 +1213,6 @@ func (cli *DockerCli) CmdPull(args ...string) error { // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(hostname) - v := url.Values{} - v.Set("fromImage", remote) - v.Set("tag", *tag) pull := func(authConfig registry.AuthConfig) error { buf, err := json.Marshal(authConfig) @@ -1216,7 +1252,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") - var flFilter opts.ListOpts + flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") if err := cmd.Parse(args); err != nil { @@ -1349,7 +1385,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { for _, out := range outs.Data { for _, repotag := range out.GetList("RepoTags") { - repo, tag := utils.ParseRepositoryTag(repotag) + repo, tag := parsers.ParseRepositoryTag(repotag) outID := out.Get("Id") if !*noTrunc { outID = utils.TruncateID(outID) @@ -1449,6 +1485,9 @@ func (cli *DockerCli) CmdPs(args ...string) error { before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited= - containers with exit code of ") + if err := cmd.Parse(args); err != nil { return nil } @@ -1472,6 +1511,24 @@ func (cli *DockerCli) CmdPs(args ...string) error { v.Set("size", "1") } + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + psFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + psFilterArgs, err = filters.ParseFlag(f, psFilterArgs) + if err != nil { + return err + } + } + if len(psFilterArgs) > 0 { + filterJson, err := filters.ToParam(psFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) if err != nil { return err @@ -1511,6 +1568,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { outCommand = out.Get("Command") ports = engine.NewTable("", 0) ) + outCommand = strconv.Quote(outCommand) if !*noTrunc { outCommand = utils.Trunc(outCommand, 20) } @@ -1549,7 +1607,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error { var ( name = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) ) if name == "" || len(cmd.Args()) > 2 { @@ -1614,7 +1672,7 @@ func (cli *DockerCli) CmdEvents(args ...string) error { loc = time.FixedZone(time.Now().Zone()) ) var setTime = func(key, value string) { - format := "2006-01-02 15:04:05 -0700 MST" + format := time.RFC3339Nano if len(value) < len(format) { format = format[:len(value)] } @@ -1736,7 +1794,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { var ( cmd = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") - proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.") + proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") ) if err := cmd.Parse(args); err != nil { @@ -1770,7 +1828,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { if tty && cli.isTerminal { if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Debugf("Error monitoring TTY size: %s", err) + log.Debugf("Error monitoring TTY size: %s", err) } } @@ -1862,7 +1920,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { type ports []int func (cli *DockerCli) CmdTag(args ...string) error { - cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") + cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") if err := cmd.Parse(args); err != nil { return nil @@ -1873,7 +1931,7 @@ func (cli *DockerCli) CmdTag(args ...string) error { } var ( - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) v = url.Values{} ) @@ -1894,6 +1952,41 @@ func (cli *DockerCli) CmdTag(args ...string) error { return nil } +func (cli *DockerCli) pullImage(image string) error { + v := url.Values{} + repos, tag := parsers.ParseRepositoryTag(image) + // pull only the image tagged 'latest' if no tag was specified + if tag == "" { + tag = "latest" + } + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(repos) + if err != nil { + return err + } + + // Load the auth config file, to be able to pull the image + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + return err + } + return nil +} + func (cli *DockerCli) CmdRun(args ...string) error { // FIXME: just use runconfig.Parse already config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) @@ -1955,37 +2048,10 @@ func (cli *DockerCli) CmdRun(args ...string) error { if statusCode == 404 { fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) - v := url.Values{} - repos, tag := utils.ParseRepositoryTag(config.Image) - // pull only the image tagged 'latest' if no tag was specified - if tag == "" { - tag = "latest" - } - v.Set("fromImage", repos) - v.Set("tag", tag) - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(repos) - if err != nil { - return err - } - - // Load the auth config file, to be able to pull the image - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + if err = cli.pullImage(config.Image); err != nil { return err } + // Retry if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { return err } @@ -2033,9 +2099,9 @@ func (cli *DockerCli) CmdRun(args ...string) error { // Block the return until the chan gets closed defer func() { - utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") + log.Debugf("End of CmdRun(), Waiting for hijack to finish.") if _, ok := <-hijacked; ok { - utils.Errorf("Hijack did not finish (chan still open)") + log.Errorf("Hijack did not finish (chan still open)") } }() @@ -2081,7 +2147,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } case err := <-errCh: if err != nil { - utils.Debugf("Error hijack: %s", err) + log.Debugf("Error hijack: %s", err) return err } } @@ -2093,13 +2159,13 @@ func (cli *DockerCli) CmdRun(args ...string) error { if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) + log.Errorf("Error monitoring TTY size: %s", err) } } if errCh != nil { if err := <-errCh; err != nil { - utils.Debugf("Error hijack: %s", err) + log.Debugf("Error hijack: %s", err) return err } } diff --git a/components/engine/api/client/hijack.go b/components/engine/api/client/hijack.go index 0a9d5d8ef2..ba6ebfb0d8 100644 --- a/components/engine/api/client/hijack.go +++ b/components/engine/api/client/hijack.go @@ -11,10 +11,11 @@ import ( "runtime" "strings" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" ) func (cli *DockerCli) dial() (net.Conn, error) { @@ -88,12 +89,12 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea }() // When TTY is ON, use regular copy - if setRawTerminal { + if setRawTerminal && stdout != nil { _, err = io.Copy(stdout, br) } else { _, err = utils.StdCopy(stdout, stderr, br) } - utils.Debugf("[hijack] End of stdout") + log.Debugf("[hijack] End of stdout") return err }) } @@ -101,15 +102,15 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea sendStdin := utils.Go(func() error { if in != nil { io.Copy(rwc, in) - utils.Debugf("[hijack] End of stdin") + log.Debugf("[hijack] End of stdin") } if tcpc, ok := rwc.(*net.TCPConn); ok { if err := tcpc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) + log.Debugf("Couldn't send EOF: %s", err) } } else if unixc, ok := rwc.(*net.UnixConn); ok { if err := unixc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) + log.Debugf("Couldn't send EOF: %s", err) } } // Discard errors due to pipe interruption @@ -118,14 +119,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea if stdout != nil || stderr != nil { if err := <-receiveStdout; err != nil { - utils.Debugf("Error receiveStdout: %s", err) + log.Debugf("Error receiveStdout: %s", err) return err } } if !cli.isTerminal { if err := <-sendStdin; err != nil { - utils.Debugf("Error sendStdin: %s", err) + log.Debugf("Error sendStdin: %s", err) return err } } diff --git a/components/engine/api/client/utils.go b/components/engine/api/client/utils.go index 13b5241d15..e4ef8d3875 100644 --- a/components/engine/api/client/utils.go +++ b/components/engine/api/client/utils.go @@ -17,12 +17,13 @@ import ( "strings" "syscall" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" ) var ( @@ -165,7 +166,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in } else { _, err = utils.StdCopy(stdout, stderr, resp.Body) } - utils.Debugf("[stream] End of stdout") + log.Debugf("[stream] End of stdout") return err } return nil @@ -180,7 +181,7 @@ func (cli *DockerCli) resizeTty(id string) { v.Set("h", strconv.Itoa(height)) v.Set("w", strconv.Itoa(width)) if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { - utils.Debugf("Error resize: %s", err) + log.Debugf("Error resize: %s", err) } } @@ -237,7 +238,7 @@ func (cli *DockerCli) getTtySize() (int, int) { } ws, err := term.GetWinsize(cli.terminalFd) if err != nil { - utils.Debugf("Error getting size: %s", err) + log.Debugf("Error getting size: %s", err) if ws == nil { return 0, 0 } diff --git a/components/engine/api/common.go b/components/engine/api/common.go index e73705000c..5cc33a9e1a 100644 --- a/components/engine/api/common.go +++ b/components/engine/api/common.go @@ -5,19 +5,20 @@ import ( "mime" "strings" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/version" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/version" ) const ( - APIVERSION version.Version = "1.13" + APIVERSION version.Version = "1.14" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) func ValidateHost(val string) (string, error) { - host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) + host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) if err != nil { return val, err } @@ -42,7 +43,7 @@ func DisplayablePorts(ports *engine.Table) string { func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { - utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) + log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) } return err == nil && mimetype == expectedType } diff --git a/components/engine/api/server/MAINTAINERS b/components/engine/api/server/MAINTAINERS index c92a061143..310a159716 100644 --- a/components/engine/api/server/MAINTAINERS +++ b/components/engine/api/server/MAINTAINERS @@ -1,2 +1,3 @@ Victor Vieux (@vieux) -Johan Euphrosine (@proppy) +# off the grid until september +# Johan Euphrosine (@proppy) diff --git a/components/engine/api/server/server.go b/components/engine/api/server/server.go index b3a0590fda..96f5bca6a6 100644 --- a/components/engine/api/server/server.go +++ b/components/engine/api/server/server.go @@ -11,7 +11,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "net" "net/http" "net/http/pprof" @@ -21,16 +20,18 @@ import ( "syscall" "code.google.com/p/go.net/websocket" - - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/listenbuffer" - "github.com/dotcloud/docker/pkg/systemd" - "github.com/dotcloud/docker/pkg/user" - "github.com/dotcloud/docker/pkg/version" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/utils" + "github.com/docker/libcontainer/user" "github.com/gorilla/mux" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/listenbuffer" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" ) var ( @@ -87,7 +88,7 @@ func httpError(w http.ResponseWriter, err error) { } if err != nil { - utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) + log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) http.Error(w, err.Error(), statusCode) } } @@ -237,10 +238,10 @@ func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseW outsLegacy := engine.NewTable("Created", 0) for _, out := range outs.Data { for _, repoTag := range out.GetList("RepoTags") { - parts := strings.Split(repoTag, ":") + repo, tag := parsers.ParseRepositoryTag(repoTag) outLegacy := &engine.Env{} - outLegacy.Set("Repository", parts[0]) - outLegacy.Set("Tag", parts[1]) + outLegacy.Set("Repository", repo) + outLegacy.SetJson("Tag", tag) outLegacy.Set("Id", out.Get("Id")) outLegacy.SetInt64("Created", out.GetInt64("Created")) outLegacy.SetInt64("Size", out.GetInt64("Size")) @@ -301,7 +302,7 @@ func getContainersChanges(eng *engine.Engine, version version.Version, w http.Re if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("changes", vars["name"]) + var job = eng.Job("container_changes", vars["name"]) streamJSON(job, w, false) return job.Run() @@ -338,6 +339,7 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo job.Setenv("since", r.Form.Get("since")) job.Setenv("before", r.Form.Get("before")) job.Setenv("limit", r.Form.Get("limit")) + job.Setenv("filters", r.Form.Get("filters")) if version.GreaterThanOrEqualTo("1.5") { streamJSON(job, w, false) @@ -437,7 +439,7 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit stdoutBuffer = bytes.NewBuffer(nil) ) if err := config.Decode(r.Body); err != nil { - utils.Errorf("%s", err) + log.Errorf("%s", err) } if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { @@ -468,6 +470,7 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon var ( image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") tag = r.Form.Get("tag") job *engine.Job ) @@ -482,18 +485,24 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon } } if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } - job = eng.Job("pull", r.Form.Get("fromImage"), tag) + job = eng.Job("pull", image, tag) job.SetenvBool("parallel", version.GreaterThan("1.3")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) } else { //import - job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag) + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) job.Stdin.Add(r.Body) } @@ -670,10 +679,12 @@ func deleteContainers(eng *engine.Engine, version version.Version, w http.Respon if vars == nil { return fmt.Errorf("Missing parameter") } - job := eng.Job("container_delete", vars["name"]) + job := eng.Job("delete", vars["name"]) + + job.Setenv("forceRemove", r.Form.Get("force")) + job.Setenv("removeVolume", r.Form.Get("v")) job.Setenv("removeLink", r.Form.Get("link")) - job.Setenv("forceRemove", r.Form.Get("force")) if err := job.Run(); err != nil { return err } @@ -706,13 +717,16 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res ) // allow a nil body for backwards compatibility - if r.Body != nil { - if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") { - if err := job.DecodeEnv(r.Body); err != nil { - return err - } + if r.Body != nil && r.ContentLength > 0 { + if !api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") { + return fmt.Errorf("Content-Type of application/json is required") + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err } } + if err := job.Run(); err != nil { if err.Error() == "Container already started" { w.WriteHeader(http.StatusNotModified) @@ -864,7 +878,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp job.Stdout.Add(ws) job.Stderr.Set(ws) if err := job.Run(); err != nil { - utils.Errorf("Error attaching websocket: %s", err) + log.Errorf("Error attaching websocket: %s", err) } }) h.ServeHTTP(w, r) @@ -991,7 +1005,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) if err := job.Run(); err != nil { - utils.Errorf("%s", err.Error()) + log.Errorf("%s", err.Error()) if strings.Contains(err.Error(), "No such container") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { @@ -1019,16 +1033,16 @@ func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request - utils.Debugf("Calling %s %s", localMethod, localRoute) + log.Debugf("Calling %s %s", localMethod, localRoute) if logging { - log.Println(r.Method, r.RequestURI) + log.Infof("%s %s", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { - utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) @@ -1045,7 +1059,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { - utils.Errorf("Error making handler: %s", err) + log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } @@ -1134,7 +1148,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st for method, routes := range m { for route, fct := range routes { - utils.Debugf("Registering %s, %s", method, route) + log.Debugf("Registering %s, %s", method, route) // NOTE: scope issue, make sure the variables are local and won't be changed localRoute := route localFct := fct @@ -1181,7 +1195,7 @@ func ServeFd(addr string, handle http.Handler) error { chErrors := make(chan error, len(ls)) // We don't want to start serving on these sockets until the - // "initserver" job has completed. Otherwise required handlers + // daemon is initialized and installed. Otherwise required handlers // won't be ready. <-activationLock @@ -1224,7 +1238,7 @@ func changeGroup(addr string, nameOrGid string) error { return err } - utils.Debugf("%s group found. gid: %d", nameOrGid, gid) + log.Debugf("%s group found. gid: %d", nameOrGid, gid) return os.Chown(addr, 0, gid) } @@ -1295,7 +1309,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { switch proto { case "tcp": if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { - log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } case "unix": socketGroup := job.Getenv("SocketGroup") @@ -1303,7 +1317,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { if err := changeGroup(addr, socketGroup); err != nil { if socketGroup == "docker" { // if the user hasn't explicitly specified the group ownership, don't fail on errors. - utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) + log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) } else { return err } @@ -1338,7 +1352,7 @@ func ServeApi(job *engine.Job) engine.Status { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { - log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) + log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } diff --git a/components/engine/api/server/server_unit_test.go b/components/engine/api/server/server_unit_test.go index 2d14f89551..950fea51d4 100644 --- a/components/engine/api/server/server_unit_test.go +++ b/components/engine/api/server/server_unit_test.go @@ -7,11 +7,13 @@ import ( "io" "net/http" "net/http/httptest" + "reflect" "strings" "testing" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/engine" + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/version" ) func TestGetBoolParam(t *testing.T) { @@ -111,8 +113,105 @@ func TestGetInfo(t *testing.T) { if v.GetInt("Containers") != 1 { t.Fatalf("%#v\n", v) } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) + assertContentType(r, "application/json", t) +} + +func TestGetImagesJSON(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + v := createEnvFromGetImagesJSONStruct(sampleImage) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + var observed getImagesJSONStruct + if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(observed, sampleImage) { + t.Errorf("Expected %#v but got %#v", sampleImage, observed) + } +} + +func TestGetImagesJSONFilter(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filter") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) + if filter != "aaaa" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONFilters(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filters") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) + if filter != "nnnn" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONAll(t *testing.T) { + eng := engine.New() + allFilter := "-1" + eng.Register("images", func(job *engine.Job) engine.Status { + allFilter = job.Getenv("all") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?all=1", nil, eng, t) + if allFilter != "1" { + t.Errorf("%#v", allFilter) + } +} + +func TestGetImagesJSONLegacyFormat(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + outsLegacy := engine.NewTable("Created", 0) + outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) + if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + images := engine.NewTable("Created", 0) + if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { + t.Fatal(err) + } + if images.Len() != 1 { + t.Fatalf("Expected 1 image, %d found", images.Len()) + } + image := images.Data[0] + if image.Get("Tag") != "test-tag" { + t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) + } + if image.Get("Repository") != "test-name" { + t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) } } @@ -123,12 +222,12 @@ func TestGetContainersByName(t *testing.T) { eng.Register("container_inspect", func(job *engine.Job) engine.Status { called = true if job.Args[0] != name { - t.Fatalf("name != '%s': %#v", name, job.Args[0]) + t.Errorf("name != '%s': %#v", name, job.Args[0]) } if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { - t.Fatal("dirty env variable not set") + t.Errorf("dirty env variable not set") } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { - t.Fatal("dirty env variable set when it shouldn't") + t.Errorf("dirty env variable set when it shouldn't") } v := &engine.Env{} v.SetBool("dirty", true) @@ -141,9 +240,7 @@ func TestGetContainersByName(t *testing.T) { if !called { t.Fatal("handler was not called") } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } + assertContentType(r, "application/json", t) var stdoutJson interface{} if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { t.Fatalf("%#v", err) @@ -178,21 +275,19 @@ func TestGetEvents(t *testing.T) { if !called { t.Fatal("handler was not called") } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } + assertContentType(r, "application/json", t) var stdout_json struct { Since int Until int } if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { - t.Fatalf("%#v", err) + t.Fatal(err) } if stdout_json.Since != 1 { - t.Fatalf("since != 1: %#v", stdout_json.Since) + t.Errorf("since != 1: %#v", stdout_json.Since) } if stdout_json.Until != 0 { - t.Fatalf("until != 0: %#v", stdout_json.Until) + t.Errorf("until != 0: %#v", stdout_json.Until) } } @@ -319,13 +414,77 @@ func TestGetImagesHistory(t *testing.T) { } } +func TestGetImagesByName(t *testing.T) { + eng := engine.New() + name := "image_name" + var called bool + eng.Register("image_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Fatal("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Fatal("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestDeleteContainers(t *testing.T) { + eng := engine.New() + name := "foo" + var called bool + eng.Register("delete", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatalf("Job arguments is empty") + } + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + return engine.StatusOK + }) + r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusNoContent { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) + } +} + func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) +} + +func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { r := httptest.NewRecorder() req, err := http.NewRequest(method, target, body) if err != nil { t.Fatal(err) } - if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := ServeRequest(eng, version, r, req); err != nil { t.Fatal(err) } return r @@ -351,3 +510,46 @@ func toJson(data interface{}, t *testing.T) io.Reader { } return &buf } + +func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) { + if recorder.HeaderMap.Get("Content-Type") != content_type { + t.Fatalf("%#v\n", recorder) + } +} + +// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that +// should die as soon as we converted all integration tests? +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + +func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env { + v := &engine.Env{} + v.SetList("RepoTags", data.RepoTags) + v.Set("Id", data.Id) + v.SetInt64("Created", data.Created) + v.SetInt64("Size", data.Size) + v.SetInt64("VirtualSize", data.VirtualSize) + return v +} + +type getImagesJSONStruct struct { + RepoTags []string + Id string + Created int64 + Size int64 + VirtualSize int64 +} + +var sampleImage getImagesJSONStruct = getImagesJSONStruct{ + RepoTags: []string{"test-name:test-tag"}, + Id: "ID", + Created: 999, + Size: 777, + VirtualSize: 666, +} diff --git a/components/engine/archive/MAINTAINERS b/components/engine/archive/MAINTAINERS index 1e998f8ac1..2aac7265d2 100644 --- a/components/engine/archive/MAINTAINERS +++ b/components/engine/archive/MAINTAINERS @@ -1 +1,2 @@ -Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff --git a/components/engine/archive/archive.go b/components/engine/archive/archive.go index 2ba62f5363..7d9f7fb974 100644 --- a/components/engine/archive/archive.go +++ b/components/engine/archive/archive.go @@ -16,9 +16,11 @@ import ( "strings" "syscall" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" ) type ( @@ -61,7 +63,7 @@ func DetectCompression(source []byte) Compression { Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { if len(source) < len(m) { - utils.Debugf("Len too short") + log.Debugf("Len too short") continue } if bytes.Compare(m, source[:len(m)]) == 0 { @@ -83,7 +85,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - utils.Debugf("[tar autodetect] n: %v", bs) + log.Debugf("[tar autodetect] n: %v", bs) compression := DetectCompression(bs) @@ -131,7 +133,7 @@ func (compression *Compression) Extension() string { return "" } -func addTarFile(path, name string, tw *tar.Writer) error { +func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { fi, err := os.Lstat(path) if err != nil { return err @@ -177,15 +179,22 @@ func addTarFile(path, name string, tw *tar.Writer) error { } if hdr.Typeflag == tar.TypeReg { - if file, err := os.Open(path); err != nil { + file, err := os.Open(path) + if err != nil { return err - } else { - _, err := io.Copy(tw, file) - if err != nil { - return err - } - file.Close() } + + twBuf.Reset(tw) + _, err = io.Copy(twBuf, file) + file.Close() + if err != nil { + return err + } + err = twBuf.Flush() + if err != nil { + return err + } + twBuf.Reset(nil) } return nil @@ -245,7 +254,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeXGlobalHeader: - utils.Debugf("PAX Global Extended Headers found and ignored") + log.Debugf("PAX Global Extended Headers found and ignored") return nil default: @@ -328,10 +337,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) options.Includes = []string{"."} } + twBuf := bufio.NewWriterSize(nil, twBufSize) + for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { - utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err) + log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } @@ -340,23 +351,21 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil } - for _, exclude := range options.Excludes { - matched, err := filepath.Match(exclude, relFilePath) - if err != nil { - utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) - return err - } - if matched { - utils.Debugf("Skipping excluded path: %s", relFilePath) - if f.IsDir() { - return filepath.SkipDir - } - return nil - } + skip, err := utils.Matches(relFilePath, options.Excludes) + if err != nil { + log.Debugf("Error matching %s", relFilePath, err) + return err } - if err := addTarFile(filePath, relFilePath, tw); err != nil { - utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err) + if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { + log.Debugf("Can't add file %s to tar: %s", srcPath, err) } return nil }) @@ -364,13 +373,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // Make sure to check the error on Close. if err := tw.Close(); err != nil { - utils.Debugf("Can't close tar writer: %s\n", err) + log.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { - utils.Debugf("Can't close compress writer: %s\n", err) + log.Debugf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { - utils.Debugf("Can't close pipe writer: %s\n", err) + log.Debugf("Can't close pipe writer: %s", err) } }() @@ -383,10 +392,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(archive io.Reader, dest string, options *TarOptions) error { + if options == nil { + options = &TarOptions{} + } + if archive == nil { return fmt.Errorf("Empty archive") } + if options.Excludes == nil { + options.Excludes = []string{} + } + decompressedArchive, err := DecompressStream(archive) if err != nil { return err @@ -394,10 +411,12 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { defer decompressedArchive.Close() tr := tar.NewReader(decompressedArchive) + trBuf := bufio.NewReaderSize(nil, trBufSize) var dirs []*tar.Header // Iterate through the files in the archive. +loop: for { hdr, err := tr.Next() if err == io.EOF { @@ -411,6 +430,12 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) + for _, exclude := range options.Excludes { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) @@ -439,7 +464,8 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { } } } - if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil { + trBuf.Reset(tr) + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { return err } @@ -465,7 +491,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { // the output of one piped into the other. If either Tar or Untar fails, // TarUntar aborts and returns the error. func TarUntar(src string, dst string) error { - utils.Debugf("TarUntar(%s %s)", src, dst) + log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err @@ -502,11 +528,11 @@ func CopyWithTar(src, dst string) error { return CopyFileWithTar(src, dst) } // Create dst, copy src's content into it - utils.Debugf("Creating dest directory: %s", dst) + log.Debugf("Creating dest directory: %s", dst) if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { return err } - utils.Debugf("Calling TarUntar(%s, %s)", src, dst) + log.Debugf("Calling TarUntar(%s, %s)", src, dst) return TarUntar(src, dst) } @@ -517,7 +543,7 @@ func CopyWithTar(src, dst string) error { // If `dst` ends with a trailing slash '/', the final destination path // will be `dst/base(src)`. func CopyFileWithTar(src, dst string) (err error) { - utils.Debugf("CopyFileWithTar(%s, %s)", src, dst) + log.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err @@ -544,19 +570,19 @@ func CopyFileWithTar(src, dst string) (err error) { } defer srcF.Close() - tw := tar.NewWriter(w) hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Name = filepath.Base(dst) + tw := tar.NewWriter(w) + defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } - tw.Close() return nil }) defer func() { diff --git a/components/engine/archive/archive_test.go b/components/engine/archive/archive_test.go index 61ee0af8e7..b46f953228 100644 --- a/components/engine/archive/archive_test.go +++ b/components/engine/archive/archive_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestCmdStreamLargeStderr(t *testing.T) { @@ -109,6 +109,9 @@ func TestTarUntar(t *testing.T) { if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } for _, c := range []Compression{ Uncompressed, @@ -116,13 +119,14 @@ func TestTarUntar(t *testing.T) { } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, + Excludes: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } - if len(changes) != 0 { + if len(changes) != 1 || changes[0].Path != "/3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } @@ -199,3 +203,42 @@ func TestUntarUstarGnuConflict(t *testing.T) { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.SetBytes(int64(n)) + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} diff --git a/components/engine/archive/changes.go b/components/engine/archive/changes.go index 1e588b8eb5..a591e8ae11 100644 --- a/components/engine/archive/changes.go +++ b/components/engine/archive/changes.go @@ -1,6 +1,7 @@ package archive import ( + "bufio" "bytes" "fmt" "io" @@ -10,9 +11,10 @@ import ( "syscall" "time" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/system" ) type ChangeType int @@ -343,6 +345,7 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { tw := tar.NewWriter(writer) go func() { + twBuf := bufio.NewWriterSize(nil, twBufSize) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -361,19 +364,19 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { ChangeTime: timestamp, } if err := tw.WriteHeader(hdr); err != nil { - utils.Debugf("Can't write whiteout header: %s\n", err) + log.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) - if err := addTarFile(path, change.Path[1:], tw); err != nil { - utils.Debugf("Can't add file %s to tar: %s\n", path, err) + if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { + log.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := tw.Close(); err != nil { - utils.Debugf("Can't close layer: %s\n", err) + log.Debugf("Can't close layer: %s", err) } writer.Close() }() diff --git a/components/engine/archive/common.go b/components/engine/archive/common.go new file mode 100644 index 0000000000..2aac34e840 --- /dev/null +++ b/components/engine/archive/common.go @@ -0,0 +1,4 @@ +package archive + +const twBufSize = 32 * 1024 +const trBufSize = 32 * 1024 diff --git a/components/engine/archive/diff.go b/components/engine/archive/diff.go index d169669126..a805f2c0a1 100644 --- a/components/engine/archive/diff.go +++ b/components/engine/archive/diff.go @@ -1,6 +1,7 @@ package archive import ( + "bufio" "fmt" "io" "io/ioutil" @@ -9,7 +10,7 @@ import ( "strings" "syscall" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. @@ -32,6 +33,7 @@ func ApplyLayer(dest string, layer ArchiveReader) error { } tr := tar.NewReader(layer) + trBuf := bufio.NewReaderSize(nil, trBufSize) var dirs []*tar.Header @@ -108,7 +110,8 @@ func ApplyLayer(dest string, layer ArchiveReader) error { } } - srcData := io.Reader(tr) + trBuf.Reset(tr) + srcData := io.Reader(trBuf) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so diff --git a/components/engine/archive/wrap.go b/components/engine/archive/wrap.go index 03ea5083ac..b8b60197a3 100644 --- a/components/engine/archive/wrap.go +++ b/components/engine/archive/wrap.go @@ -2,7 +2,7 @@ package archive import ( "bytes" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io/ioutil" ) diff --git a/components/engine/builtins/builtins.go b/components/engine/builtins/builtins.go index 3fa06510d0..0aa2f43c16 100644 --- a/components/engine/builtins/builtins.go +++ b/components/engine/builtins/builtins.go @@ -3,14 +3,14 @@ package builtins import ( "runtime" - "github.com/dotcloud/docker/api" - apiserver "github.com/dotcloud/docker/api/server" - "github.com/dotcloud/docker/daemon/networkdriver/bridge" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/server" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon/networkdriver/bridge" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/events" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/registry" ) func Register(eng *engine.Engine) error { @@ -20,6 +20,9 @@ func Register(eng *engine.Engine) error { if err := remote(eng); err != nil { return err } + if err := events.New().Install(eng); err != nil { + return err + } if err := eng.Register("version", dockerVersion); err != nil { return err } @@ -50,9 +53,6 @@ func remote(eng *engine.Engine) error { // These components should be broken off into plugins of their own. // func daemon(eng *engine.Engine) error { - if err := eng.Register("initserver", server.InitServer); err != nil { - return err - } return eng.Register("init_networkdriver", bridge.InitDriver) } @@ -65,7 +65,7 @@ func dockerVersion(job *engine.Job) engine.Status { v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) - if kernelVersion, err := utils.GetKernelVersion(); err == nil { + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { v.Set("KernelVersion", kernelVersion.String()) } if _, err := v.WriteTo(job.Stdout); err != nil { diff --git a/components/engine/contrib/check-config.sh b/components/engine/contrib/check-config.sh index fe4b9f1b9b..cb6a4f2b50 100755 --- a/components/engine/contrib/check-config.sh +++ b/components/engine/contrib/check-config.sh @@ -113,6 +113,23 @@ else echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" fi +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + fi +fi + flags=( NAMESPACES {NET,PID,IPC,UTS}_NS DEVPTS_MULTIPLE_INSTANCES diff --git a/components/engine/contrib/completion/bash/docker b/components/engine/contrib/completion/bash/docker index 89395560f9..d6d622ff33 100755 --- a/components/engine/contrib/completion/bash/docker +++ b/components/engine/contrib/completion/bash/docker @@ -156,7 +156,7 @@ _docker_build() *) local counter="$(__docker_pos_first_nonflag '-t|--tag')" if [ $cword -eq $counter ]; then - _filedir + _filedir -d fi ;; esac @@ -485,21 +485,52 @@ _docker_rmi() _docker_run() { case "$prev" in - --cidfile) + -a|--attach) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cidfile|--env-file) _filedir + return ;; --volumes-from) __docker_containers_all + return ;; -v|--volume) - # TODO something magical with colons and _filedir ? + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + compopt -o nospace + ;; + /*) + _filedir + compopt -o nospace + ;; + esac return ;; -e|--env) COMPREPLY=( $( compgen -e -- "$cur" ) ) + compopt -o nospace return ;; - --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf) + --link) + case "$cur" in + *:*) + ;; + *) + __docker_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) return ;; *) diff --git a/components/engine/contrib/completion/fish/docker.fish b/components/engine/contrib/completion/fish/docker.fish index a4a9365f92..ba83526c75 100644 --- a/components/engine/contrib/completion/fish/docker.fish +++ b/components/engine/contrib/completion/fish/docker.fish @@ -85,7 +85,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Conf complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp -complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from a container's filesystem to the host path' +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path" # diff complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" diff --git a/components/engine/contrib/completion/zsh/_docker b/components/engine/contrib/completion/zsh/_docker index 3f96f00ef7..faf17b2bea 100644 --- a/components/engine/contrib/completion/zsh/_docker +++ b/components/engine/contrib/completion/zsh/_docker @@ -1,58 +1,118 @@ -#compdef docker +#compdef docker # # zsh completion for docker (http://docker.com) # -# version: 0.2.2 -# author: Felix Riedel -# license: BSD License +# version: 0.3.0 # github: https://github.com/felixr/docker-zsh-completion # +# contributers: +# - Felix Riedel +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# __parse_docker_list() { - sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}' + awk ' +NR == 1 { + idx=1;i=0;f[i]=0 + header=$0 + while ( match(header, / ([A-Z]+|[A-Z]+ [A-Z]+)/) ) { + idx += RSTART+1 + f[++i]=idx + header = substr($0,idx) + } + f[++i]=999 +} + +NR > 1 '"$1"' { + for(j=0;j 1){printf("%s\\:%s\n", $1,$2)}')"}) - images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"}) + images=(${(f)"$(_call_program commands docker images | awk '(NR > 1 && $1 != ""){printf("%s", $1);if ($2 != "") printf("\\:%s", $2); printf("\n")}')"}) + images=($images ${(f)"$(_call_program commands docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"}) _describe -t docker-images "Images" images } __docker_tags() { local expl declare -a tags - tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"}) + tags=(${(f)"$(_call_program commands docker images | awk '(NR>1){print $2}'| sort | uniq)"}) _describe -t docker-tags "tags" tags } +__docker_repositories_with_tags() { + if compset -P '*:'; then + __docker_tags + else + __docker_repositories -qS ":" + fi +} + __docker_search() { # declare -a dockersearch local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi local searchterm cachename @@ -60,14 +120,14 @@ __docker_search() { cachename=_docker-search-$searchterm local expl - local -a result + local -a result if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ && ! _retrieve_cache ${cachename#_}; then _message "Searching for ${searchterm}..." - result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"}) + result=(${(f)"$(_call_program commands docker search ${searchterm} | awk '(NR>2){print $1}')"}) _store_cache ${cachename#_} result - fi - _wanted dockersearch expl 'Available images' compadd -a result + fi + _wanted dockersearch expl 'Available images' compadd -a result } __docker_caching_policy() @@ -81,8 +141,8 @@ __docker_caching_policy() __docker_repositories () { local expl declare -a repos - repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"}) - _describe -t docker-repos "Repositories" repos + repos=(${(f)"$(_call_program commands docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"}) + _describe -t docker-repos "Repositories" repos "$@" } __docker_commands () { @@ -91,15 +151,15 @@ __docker_commands () { zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ - && ! _retrieve_cache docker_subcommands; + && ! _retrieve_cache docker_subcommands; then - _docker_subcommands=(${${(f)"$(_call_program commands + _docker_subcommands=(${${(f)"$(_call_program commands docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}}) - _docker_subcommands=($_docker_subcommands 'help:Show help for a command') + _docker_subcommands=($_docker_subcommands 'help:Show help for a command') _store_cache docker_subcommands _docker_subcommands fi _describe -t docker-commands "docker command" _docker_subcommands @@ -108,100 +168,206 @@ __docker_commands () { __docker_subcommand () { local -a _command_args case "$words[1]" in - (attach|wait) - _arguments ':containers:__docker_runningcontainers' + (attach) + _arguments \ + '--no-stdin[Do not attach stdin]' \ + '--sig-proxy[Proxify all received signal]' \ + ':containers:__docker_runningcontainers' ;; (build) _arguments \ - '-t=-:repository:__docker_repositories' \ + '--no-cache[Do not use cache when building the image]' \ + '-q[Suppress verbose build output]' \ + '--rm[Remove intermediate containers after a successful build]' \ + '-t=-:repository:__docker_repositories_with_tags' \ ':path or URL:_directories' ;; (commit) _arguments \ + '--author=-[Author]:author: ' \ + '-m=-[Commit message]:message: ' \ + '--run=-[Configuration automatically applied when the image is run]:configuration: ' \ ':container:__docker_containers' \ - ':repository:__docker_repositories' \ - ':tag: ' + ':repository:__docker_repositories_with_tags' ;; - (diff|export|logs) + (cp) + _arguments \ + ':container:->container' \ + ':hostpath:_files' + case $state in + (container) + if compset -P '*:'; then + _files + else + __docker_containers -qS ":" + fi + ;; + esac + ;; + (diff|export) _arguments '*:containers:__docker_containers' ;; (history) - _arguments '*:images:__docker_images' + _arguments \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '*:images:__docker_images' ;; (images) _arguments \ '-a[Show all images]' \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '--tree[Output graph in tree format]' \ + '--viz[Output graph in graphviz format]' \ ':repository:__docker_repositories' ;; (inspect) - _arguments '*:containers:__docker_containers' + _arguments \ + '--format=-[Format the output using the given go template]:template: ' \ + '*:containers:__docker_containers' ;; - (history) - _arguments ':images:__docker_images' + (import) + _arguments \ + ':URL:(- http:// file://)' \ + ':repository:__docker_repositories_with_tags' + ;; + (info) + ;; + (import) + _arguments \ + ':URL:(- http:// file://)' \ + ':repository:__docker_repositories_with_tags' + ;; + (insert) + _arguments '1:containers:__docker_containers' \ + '2:URL:(http:// file://)' \ + '3:file:_files' ;; (kill) _arguments '*:containers:__docker_runningcontainers' ;; + (load) + ;; + (login) + _arguments \ + '-e=-[Email]:email: ' \ + '-p=-[Password]:password: ' \ + '-u=-[Username]:username: ' \ + ':server: ' + ;; + (logs) + _arguments \ + '-f[Follow log output]' \ + '*:containers:__docker_containers' + ;; (port) - _arguments '1:containers:__docker_runningcontainers' + _arguments \ + '1:containers:__docker_runningcontainers' \ + '2:port:_ports' ;; (start) - _arguments '*:containers:__docker_stoppedcontainers' + _arguments \ + '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \ + '-i[Attach container'"'"'s stding]' \ + '*:containers:__docker_stoppedcontainers' ;; (rm) - _arguments '-v[Remove the volumes associated to the container]' \ + _arguments \ + '--link[Remove the specified link and not the underlying container]' \ + '-v[Remove the volumes associated to the container]' \ '*:containers:__docker_stoppedcontainers' ;; (rmi) - _arguments '-v[Remove the volumes associated to the container]' \ + _arguments \ '*:images:__docker_images' ;; - (top) - _arguments '1:containers:__docker_runningcontainers' - ;; (restart|stop) _arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ '*:containers:__docker_runningcontainers' ;; (top) - _arguments ':containers:__docker_runningcontainers' + _arguments \ + '1:containers:__docker_runningcontainers' \ + '(-)*:: :->ps-arguments' + case $state in + (ps-arguments) + _ps + ;; + esac + ;; (ps) - _arguments '-a[Show all containers. Only running containers are shown by default]' \ - '-h[Show help]' \ - '--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ - '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' + _arguments \ + '-a[Show all containers]' \ + '--before=-[Show only container created before...]:containers:__docker_containers' \ + '-l[Show only the latest created container]' \ + '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ + '--no-trunc[Do not truncate output]' \ + '-q[Only show numeric IDs]' \ + '-s[Display sizes]' \ + '--since=-[Show only containers created since...]:containers:__docker_containers' ;; (tag) _arguments \ '-f[force]'\ ':image:__docker_images'\ - ':repository:__docker_repositories' \ - ':tag:__docker_tags' + ':repository:__docker_repositories_with_tags' ;; (run) _arguments \ - '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \ - '-c=-[CPU shares (relative weight)]:CPU shares: ' \ + '-P[Publish all exposed ports to the host]' \ + '-a[Attach to stdin, stdout or stderr]' \ + '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '--cidfile=-[Write the container ID to the file]:CID file:_files' \ '-d[Detached mode: leave the container running in the background]' \ - '*--dns=[Set custom dns servers]:dns server: ' \ - '*-e=[Set environment variables]:environment variable: ' \ + '*--dns=-[Set custom dns servers]:dns server: ' \ + '*-e=-[Set environment variables]:environment variable: ' \ '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '*--expose=-[Expose a port from the container without publishing it]: ' \ '-h=-[Container host name]:hostname:_hosts' \ '-i[Keep stdin open even if not attached]' \ + '--link=-[Add link to another container]:link:->link' \ + '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ '-m=-[Memory limit (in bytes)]:limit: ' \ - '*-p=-[Expose a container''s port to the host]:port:_ports' \ - '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \ + '--name=-[Container name]:name: ' \ + '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \ + '--privileged[Give extended privileges to this container]' \ + '--rm[Remove intermediate containers when it exits]' \ + '--sig-proxy[Proxify all received signal]' \ + '-t[Allocate a pseudo-tty]' \ '-u=-[Username or UID]:user:_users' \ '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + '-w=-[Working directory inside the container]:directory:_directories' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' - ;; + + case $state in + (link) + if compset -P '*:'; then + _wanted alias expl 'Alias' compadd -E "" + else + __docker_runningcontainers -qS ":" + fi + ;; + esac + + ;; (pull|search) _arguments ':name:__docker_search' ;; + (push) + _arguments ':repository:__docker_repositories_with_tags' + ;; + (save) + _arguments \ + ':images:__docker_images' + ;; + (wait) + _arguments ':containers:__docker_runningcontainers' + ;; (help) _arguments ':subcommand:__docker_commands' ;; @@ -212,24 +378,31 @@ __docker_subcommand () { } _docker () { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + local curcontext="$curcontext" state line typeset -A opt_args _arguments -C \ '-H=-[tcp://host:port to bind/connect to]:socket: ' \ '(-): :->command' \ - '(-)*:: :->option-or-argument' + '(-)*:: :->option-or-argument' if (( CURRENT == 1 )); then fi - case $state in + case $state in (command) __docker_commands ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-$words[1]: - __docker_subcommand + __docker_subcommand ;; esac } diff --git a/components/engine/contrib/crashTest.go b/components/engine/contrib/crashTest.go deleted file mode 100644 index 6da89bf887..0000000000 --- a/components/engine/contrib/crashTest.go +++ /dev/null @@ -1,125 +0,0 @@ -package main - -import ( - "fmt" - "io" - "log" - "net" - "os" - "os/exec" - "path" - "time" -) - -var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker") - -// WARNING: this crashTest will 1) crash your host, 2) remove all containers -func runDaemon() (*exec.Cmd, error) { - os.Remove("/var/run/docker.pid") - exec.Command("rm", "-rf", "/var/lib/docker/containers").Run() - cmd := exec.Command(DOCKERPATH, "-d") - outPipe, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - errPipe, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - if err := cmd.Start(); err != nil { - return nil, err - } - go func() { - io.Copy(os.Stdout, outPipe) - }() - go func() { - io.Copy(os.Stderr, errPipe) - }() - return cmd, nil -} - -func crashTest() error { - if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil { - return err - } - - var endpoint string - if ep := os.Getenv("TEST_ENDPOINT"); ep == "" { - endpoint = "192.168.56.1:7979" - } else { - endpoint = ep - } - - c := make(chan bool) - var conn io.Writer - - go func() { - conn, _ = net.Dial("tcp", endpoint) - c <- false - }() - go func() { - time.Sleep(2 * time.Second) - c <- true - }() - <-c - - restartCount := 0 - totalTestCount := 1 - for { - daemon, err := runDaemon() - if err != nil { - return err - } - restartCount++ - // time.Sleep(5000 * time.Millisecond) - var stop bool - go func() error { - stop = false - for i := 0; i < 100 && !stop; { - func() error { - cmd := exec.Command(DOCKERPATH, "run", "ubuntu", "echo", fmt.Sprintf("%d", totalTestCount)) - i++ - totalTestCount++ - outPipe, err := cmd.StdoutPipe() - if err != nil { - return err - } - inPipe, err := cmd.StdinPipe() - if err != nil { - return err - } - if err := cmd.Start(); err != nil { - return err - } - if conn != nil { - go io.Copy(conn, outPipe) - } - - // Expecting error, do not check - inPipe.Write([]byte("hello world!!!!!\n")) - go inPipe.Write([]byte("hello world!!!!!\n")) - go inPipe.Write([]byte("hello world!!!!!\n")) - inPipe.Close() - - if err := cmd.Wait(); err != nil { - return err - } - outPipe.Close() - return nil - }() - } - return nil - }() - time.Sleep(20 * time.Second) - stop = true - if err := daemon.Process.Kill(); err != nil { - return err - } - } -} - -func main() { - if err := crashTest(); err != nil { - log.Println(err) - } -} diff --git a/components/engine/contrib/desktop-integration/iceweasel/Dockerfile b/components/engine/contrib/desktop-integration/iceweasel/Dockerfile index 80d6a55e4a..0f3e8f2527 100644 --- a/components/engine/contrib/desktop-integration/iceweasel/Dockerfile +++ b/components/engine/contrib/desktop-integration/iceweasel/Dockerfile @@ -29,7 +29,7 @@ FROM debian:wheezy MAINTAINER Daniel Mizyrycki # Install Iceweasel and "sudo" -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo +RUN apt-get update && apt-get install -y iceweasel sudo # create sysadmin account RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin diff --git a/components/engine/contrib/docker-device-tool/device_tool.go b/components/engine/contrib/docker-device-tool/device_tool.go index a9327f9de1..23d19f0237 100644 --- a/components/engine/contrib/docker-device-tool/device_tool.go +++ b/components/engine/contrib/docker-device-tool/device_tool.go @@ -3,7 +3,7 @@ package main import ( "flag" "fmt" - "github.com/dotcloud/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/daemon/graphdriver/devmapper" "os" "path" "sort" diff --git a/components/engine/contrib/host-integration/Dockerfile.dev b/components/engine/contrib/host-integration/Dockerfile.dev index 800216532f..1c0fbd8323 100644 --- a/components/engine/contrib/host-integration/Dockerfile.dev +++ b/components/engine/contrib/host-integration/Dockerfile.dev @@ -19,7 +19,7 @@ ENV GOROOT /goroot ENV GOPATH /go ENV PATH $GOROOT/bin:$PATH -RUN go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3 +RUN go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3 ADD manager.go /manager/ RUN cd /manager && go build -o /usr/bin/manager diff --git a/components/engine/contrib/host-integration/manager.go b/components/engine/contrib/host-integration/manager.go index 2798a5d06f..c0b488b2f1 100644 --- a/components/engine/contrib/host-integration/manager.go +++ b/components/engine/contrib/host-integration/manager.go @@ -5,7 +5,7 @@ import ( "encoding/json" "flag" "fmt" - "github.com/dotcloud/docker" + "github.com/docker/docker" "os" "strings" "text/template" diff --git a/components/engine/contrib/host-integration/manager.sh b/components/engine/contrib/host-integration/manager.sh index fecf4bf64b..8ea296f5a5 100755 --- a/components/engine/contrib/host-integration/manager.sh +++ b/components/engine/contrib/host-integration/manager.sh @@ -37,7 +37,7 @@ if [ ! -e "manager/$script" ]; then exit 1 fi -# TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting) +# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting) #if command -v docker > /dev/null 2>&1; then # image="$(docker inspect -f '{{.Image}}' "$cid")" # if [ "$image" ]; then diff --git a/components/engine/contrib/init/systemd/MAINTAINERS b/components/engine/contrib/init/systemd/MAINTAINERS new file mode 100644 index 0000000000..760a76d6fe --- /dev/null +++ b/components/engine/contrib/init/systemd/MAINTAINERS @@ -0,0 +1,2 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) diff --git a/components/engine/contrib/init/systemd/docker.service b/components/engine/contrib/init/systemd/docker.service index 6f3cc33c36..0cb31e32c0 100644 --- a/components/engine/contrib/init/systemd/docker.service +++ b/components/engine/contrib/init/systemd/docker.service @@ -1,13 +1,13 @@ [Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.com -After=network.target +After=network.target docker.socket +Requires=docker.socket [Service] -ExecStart=/usr/bin/docker -d -Restart=on-failure +ExecStart=/usr/bin/docker -d -H fd:// LimitNOFILE=1048576 LimitNPROC=1048576 [Install] -WantedBy=multi-user.target +Also=docker.socket diff --git a/components/engine/contrib/init/systemd/socket-activation/docker.socket b/components/engine/contrib/init/systemd/docker.socket similarity index 70% rename from components/engine/contrib/init/systemd/socket-activation/docker.socket rename to components/engine/contrib/init/systemd/docker.socket index 3635c89385..9db5049150 100644 --- a/components/engine/contrib/init/systemd/socket-activation/docker.socket +++ b/components/engine/contrib/init/systemd/docker.socket @@ -3,6 +3,9 @@ Description=Docker Socket for the API [Socket] ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker [Install] WantedBy=sockets.target diff --git a/components/engine/contrib/init/systemd/socket-activation/docker.service b/components/engine/contrib/init/systemd/socket-activation/docker.service deleted file mode 100644 index 4af71378c8..0000000000 --- a/components/engine/contrib/init/systemd/socket-activation/docker.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Docker Application Container Engine -Documentation=http://docs.docker.com -After=network.target - -[Service] -ExecStart=/usr/bin/docker -d -H fd:// -Restart=on-failure -LimitNOFILE=1048576 -LimitNPROC=1048576 - -[Install] -WantedBy=multi-user.target diff --git a/components/engine/contrib/init/sysvinit-debian/docker b/components/engine/contrib/init/sysvinit-debian/docker index d79d9c6c07..cf33c83779 100755 --- a/components/engine/contrib/init/sysvinit-debian/docker +++ b/components/engine/contrib/init/sysvinit-debian/docker @@ -1,4 +1,5 @@ #!/bin/sh +set -e ### BEGIN INIT INFO # Provides: docker @@ -130,7 +131,7 @@ case "$1" in ;; status) - status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" docker + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" ;; *) @@ -138,5 +139,3 @@ case "$1" in exit 1 ;; esac - -exit 0 diff --git a/components/engine/contrib/init/sysvinit-redhat/docker b/components/engine/contrib/init/sysvinit-redhat/docker index aa94c04811..0c985094e6 100755 --- a/components/engine/contrib/init/sysvinit-redhat/docker +++ b/components/engine/contrib/init/sysvinit-redhat/docker @@ -50,7 +50,7 @@ start() { pid=$! touch $lockfile # wait up to 10 seconds for the pidfile to exist. see - # https://github.com/dotcloud/docker/issues/5359 + # https://github.com/docker/docker/issues/5359 tries=0 while [ ! -f $pidfile -a $tries -lt 10 ]; do sleep 1 diff --git a/components/engine/contrib/mkimage-alpine.sh b/components/engine/contrib/mkimage-alpine.sh index 0bf328efa9..b9869ae61e 100755 --- a/components/engine/contrib/mkimage-alpine.sh +++ b/components/engine/contrib/mkimage-alpine.sh @@ -19,12 +19,12 @@ tmp() { } apkv() { - curl -s $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 } getapk() { - curl -s $REPO/$ARCH/apk-tools-static-$(apkv).apk | + curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk | tar -xz -C $TMP sbin/apk.static } diff --git a/components/engine/contrib/mkimage-arch.sh b/components/engine/contrib/mkimage-arch.sh index 1f52cbc1a1..e83b2b6731 100755 --- a/components/engine/contrib/mkimage-arch.sh +++ b/components/engine/contrib/mkimage-arch.sh @@ -5,8 +5,13 @@ set -e hash pacstrap &>/dev/null || { - echo "Could not find pacstrap. Run pacman -S arch-install-scripts" - exit 1 + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 } ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) @@ -15,7 +20,21 @@ chmod 755 $ROOTFS # packages to ignore for space savings PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs -pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE +expect <&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF fi if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then @@ -123,9 +138,9 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then " "$rootfsDir/etc/apt/sources.list" echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" # LTS - if [ "$suite" = 'squeeze' ]; then + if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then head -1 "$rootfsDir/etc/apt/sources.list" \ - | sed "s/ $suite / ${suite}-lts /" \ + | sed "s/ $suite / squeeze-lts /" \ >> "$rootfsDir/etc/apt/sources.list" fi ) @@ -173,4 +188,6 @@ fi # delete all the apt list files since they're big and get stale quickly rm -rf "$rootfsDir/var/lib/apt/lists"/* # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." ) diff --git a/components/engine/contrib/nuke-graph-directory.sh b/components/engine/contrib/nuke-graph-directory.sh new file mode 100755 index 0000000000..f44c45a174 --- /dev/null +++ b/components/engine/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs &> /dev/null; then + root="$(df "$dir" | awk 'NR>1 { print $NF }')" + for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do + subvolDir="$root/$subvol" + if dir_in_dir "$subvolDir" "$dir"; then + ( set -x; btrfs subvolume delete "$subvolDir" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( set -x; rm -rf "$dir" ) diff --git a/components/engine/contrib/prepare-commit-msg.hook b/components/engine/contrib/prepare-commit-msg.hook deleted file mode 100644 index b0fe0bf675..0000000000 --- a/components/engine/contrib/prepare-commit-msg.hook +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -# Auto sign all commits to allow them to be used by the Docker project. -# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work -# -GH_USER=$(git config --get github.user) -SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p") -grep -qs "^$SOB" "$1" || { - echo - echo "$SOB" -} >> "$1" diff --git a/components/engine/daemon/MAINTAINERS b/components/engine/daemon/MAINTAINERS new file mode 100644 index 0000000000..434aad9d57 --- /dev/null +++ b/components/engine/daemon/MAINTAINERS @@ -0,0 +1,6 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) +volumes.go: Brian Goff (@cpuguy83) diff --git a/components/engine/daemon/attach.go b/components/engine/daemon/attach.go index 0e3b8b8a9d..b1b06e2765 100644 --- a/components/engine/daemon/attach.go +++ b/components/engine/daemon/attach.go @@ -1,11 +1,124 @@ package daemon import ( + "encoding/json" + "fmt" "io" + "os" + "time" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" ) +func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + logs = job.GetenvBool("logs") + stream = job.GetenvBool("stream") + stdin = job.GetenvBool("stdin") + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + ) + + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + //logs + if logs { + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + log.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + log.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + log.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + log.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + log.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + log.Errorf("Error reading logs (json): %s", err) + } else { + dec := json.NewDecoder(cLog) + for { + l := &jsonlog.JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + log.Errorf("Error streaming logs: %s", err) + break + } + if l.Stream == "stdout" && stdout { + fmt.Fprintf(job.Stdout, "%s", l.Log) + } + if l.Stream == "stderr" && stderr { + fmt.Fprintf(job.Stderr, "%s", l.Log) + } + } + } + } + + //stream + if stream { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + cStdinCloser io.Closer + ) + + if stdin { + r, w := io.Pipe() + go func() { + defer w.Close() + defer log.Debugf("Closing buffered stdin pipe") + io.Copy(w, job.Stdin) + }() + cStdin = r + cStdinCloser = job.Stdin + } + if stdout { + cStdout = job.Stdout + } + if stderr { + cStderr = job.Stderr + } + + <-daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr) + + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.State.WaitStop(-1 * time.Second) + } + } + return engine.StatusOK +} + +// FIXME: this should be private, and every outside subsystem +// should go through the "container_attach" job. But that would require +// that job to be properly documented, as well as the relationship betweem +// Attach and ContainerAttach. +// +// This method is in use by builder/builder.go. func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { var ( cStdout, cStderr io.ReadCloser @@ -19,8 +132,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo errors <- err } else { go func() { - utils.Debugf("attach: stdin: begin") - defer utils.Debugf("attach: stdin: end") + log.Debugf("attach: stdin: begin") + defer log.Debugf("attach: stdin: end") // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if container.Config.StdinOnce && !container.Config.Tty { defer cStdin.Close() @@ -43,7 +156,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo err = nil } if err != nil { - utils.Errorf("attach: stdin: %s", err) + log.Errorf("attach: stdin: %s", err) } errors <- err }() @@ -56,8 +169,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo } else { cStdout = p go func() { - utils.Debugf("attach: stdout: begin") - defer utils.Debugf("attach: stdout: end") + log.Debugf("attach: stdout: begin") + defer log.Debugf("attach: stdout: end") // If we are in StdinOnce mode, then close stdin if container.Config.StdinOnce && stdin != nil { defer stdin.Close() @@ -70,7 +183,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo err = nil } if err != nil { - utils.Errorf("attach: stdout: %s", err) + log.Errorf("attach: stdout: %s", err) } errors <- err }() @@ -81,7 +194,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo defer stdinCloser.Close() } if cStdout, err := container.StdoutPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) + log.Errorf("attach: stdout pipe: %s", err) } else { io.Copy(&utils.NopWriter{}, cStdout) } @@ -94,8 +207,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo } else { cStderr = p go func() { - utils.Debugf("attach: stderr: begin") - defer utils.Debugf("attach: stderr: end") + log.Debugf("attach: stderr: begin") + defer log.Debugf("attach: stderr: end") // If we are in StdinOnce mode, then close stdin if container.Config.StdinOnce && stdin != nil { defer stdin.Close() @@ -108,7 +221,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo err = nil } if err != nil { - utils.Errorf("attach: stderr: %s", err) + log.Errorf("attach: stderr: %s", err) } errors <- err }() @@ -120,7 +233,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo } if cStderr, err := container.StderrPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) + log.Errorf("attach: stdout pipe: %s", err) } else { io.Copy(&utils.NopWriter{}, cStderr) } @@ -140,14 +253,14 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo // FIXME: how to clean up the stdin goroutine without the unwanted side effect // of closing the passed stdin? Add an intermediary io.Pipe? for i := 0; i < nJobs; i += 1 { - utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) + log.Debugf("attach: waiting for job %d/%d", i+1, nJobs) if err := <-errors; err != nil { - utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) + log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) return err } - utils.Debugf("attach: job %d completed successfully", i+1) + log.Debugf("attach: job %d completed successfully", i+1) } - utils.Debugf("attach: all jobs completed successfully") + log.Debugf("attach: all jobs completed successfully") return nil }) } diff --git a/components/engine/server/buildfile.go b/components/engine/daemon/build.go similarity index 83% rename from components/engine/server/buildfile.go rename to components/engine/daemon/build.go index 71fed660b2..a572dc2ec2 100644 --- a/components/engine/server/buildfile.go +++ b/components/engine/daemon/build.go @@ -1,4 +1,4 @@ -package server +package daemon import ( "crypto/sha256" @@ -10,6 +10,7 @@ import ( "io/ioutil" "net/url" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -19,16 +20,99 @@ import ( "syscall" "time" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/pkg/symlink" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/archive" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) +func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + forceRm = job.GetenvBool("forcerm") + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} + tag string + context io.ReadCloser + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + repoName, tag = parsers.ParseRepositoryTag(repoName) + + if remoteURL == "" { + context = ioutil.NopCloser(job.Stdin) + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := archive.Generate("Dockerfile", string(dockerFile)) + if err != nil { + return job.Error(err) + } + context = c + } + defer context.Close() + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + b := NewBuildFile(daemon, daemon.eng, + &utils.StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + &utils.StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile) + id, err := b.Build(context) + if err != nil { + return job.Error(err) + } + if repoName != "" { + daemon.Repositories().Set(repoName, tag, id, false) + } + return engine.StatusOK +} + var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) @@ -40,15 +124,15 @@ type BuildFile interface { } type buildFile struct { - daemon *daemon.Daemon - srv *Server + daemon *Daemon + eng *engine.Engine image string maintainer string config *runconfig.Config contextPath string - context *utils.TarSum + context *tarsum.TarSum verbose bool utilizeCache bool @@ -67,6 +151,9 @@ type buildFile struct { // Deprecated, original writer used for ImagePull. To be removed. outOld io.Writer sf *utils.StreamFormatter + + // cmdSet indicates is CMD was set in current Dockerfile + cmdSet bool } func (b *buildFile) clearTmp(containers map[string]struct{}) { @@ -85,7 +172,7 @@ func (b *buildFile) CmdFrom(name string) error { image, err := b.daemon.Repositories().LookupImage(name) if err != nil { if b.daemon.Graph().IsNotExist(err) { - remote, tag := utils.ParseRepositoryTag(name) + remote, tag := parsers.ParseRepositoryTag(name) pullRegistryAuth := b.authConfig if len(b.configFile.Configs) > 0 { // The request came with a full auth config file, we prefer to use that @@ -96,7 +183,7 @@ func (b *buildFile) CmdFrom(name string) error { resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) pullRegistryAuth = &resolvedAuth } - job := b.srv.Eng.Job("pull", remote, tag) + job := b.eng.Job("pull", remote, tag) job.SetenvBool("json", b.sf.Json()) job.SetenvBool("parallel", true) job.SetenvJson("authConfig", pullRegistryAuth) @@ -118,7 +205,7 @@ func (b *buildFile) CmdFrom(name string) error { b.config = image.Config } if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv) + b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv) } // Process ONBUILD triggers if they exist if nTriggers := len(b.config.OnBuild); nTriggers != 0 { @@ -167,20 +254,20 @@ func (b *buildFile) CmdMaintainer(name string) error { // probeCache checks to see if image-caching is enabled (`b.utilizeCache`) // and if so attempts to look up the current `b.image` and `b.config` pair -// in the current server `b.srv`. If an image is found, probeCache returns +// in the current server `b.daemon`. If an image is found, probeCache returns // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there // is any error, it returns `(false, err)`. func (b *buildFile) probeCache() (bool, error) { if b.utilizeCache { - if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil { + if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil { return false, err } else if cache != nil { fmt.Fprintf(b.outStream, " ---> Using cache\n") - utils.Debugf("[BUILDER] Use cached version") + log.Debugf("[BUILDER] Use cached version") b.image = cache.ID return true, nil } else { - utils.Debugf("[BUILDER] Cache miss") + log.Debugf("[BUILDER] Cache miss") } } return false, nil @@ -196,12 +283,13 @@ func (b *buildFile) CmdRun(args string) error { } cmd := b.config.Cmd - b.config.Cmd = nil + // set Cmd manually, this is special case only for Dockerfiles + b.config.Cmd = config.Cmd runconfig.Merge(b.config, config) defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - utils.Debugf("Command to be executed: %v", b.config.Cmd) + log.Debugf("Command to be executed: %v", b.config.Cmd) hit, err := b.probeCache() if err != nil { @@ -291,7 +379,7 @@ func (b *buildFile) CmdEnv(args string) error { func (b *buildFile) buildCmdFromJson(args string) []string { var cmd []string if err := json.Unmarshal([]byte(args), &cmd); err != nil { - utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) + log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) cmd = []string{"/bin/sh", "-c", args} } return cmd @@ -303,12 +391,17 @@ func (b *buildFile) CmdCmd(args string) error { if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { return err } + b.cmdSet = true return nil } func (b *buildFile) CmdEntrypoint(args string) error { entrypoint := b.buildCmdFromJson(args) b.config.Entrypoint = entrypoint + // if there is no cmd in current Dockerfile - cleanup cmd + if !b.cmdSet { + b.config.Cmd = nil + } if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { return err } @@ -404,7 +497,7 @@ func (b *buildFile) checkPathForAddition(orig string) error { return nil } -func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error { +func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error { var ( err error destExists = true @@ -459,7 +552,7 @@ func (b *buildFile) addContext(container *daemon.Container, orig, dest string, d if err := archive.UntarPath(origPath, tarDest); err == nil { return nil } else if err != io.EOF { - utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) + log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) } } @@ -553,7 +646,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp if err != nil { return err } - tarSum := &utils.TarSum{Reader: r, DisableCompression: true} + tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true} if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } @@ -656,7 +749,7 @@ func (b *buildFile) CmdAdd(args string) error { return b.runContextCommand(args, true, true, "ADD") } -func (b *buildFile) create() (*daemon.Container, error) { +func (b *buildFile) create() (*Container, error) { if b.image == "" { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } @@ -677,10 +770,15 @@ func (b *buildFile) create() (*daemon.Container, error) { return c, nil } -func (b *buildFile) run(c *daemon.Container) error { +func (b *buildFile) run(c *Container) error { var errCh chan error if b.verbose { errCh = utils.Go(func() error { + // FIXME: call the 'attach' job so that daemon.Attach can be made private + // + // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach + // but without hijacking for stdin. Also, with attach there can be race + // condition because of some output already was printed before it. return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream) }) } @@ -775,7 +873,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) { return "", err } - b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true} + b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true} if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { return "", err } @@ -889,10 +987,10 @@ func fixPermissions(destination string, uid, gid int) error { }) } -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { +func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { return &buildFile{ - daemon: srv.daemon, - srv: srv, + daemon: d, + eng: eng, config: &runconfig.Config{}, outStream: outStream, errStream: errStream, diff --git a/components/engine/daemon/changes.go b/components/engine/daemon/changes.go new file mode 100644 index 0000000000..1e5726eda8 --- /dev/null +++ b/components/engine/daemon/changes.go @@ -0,0 +1,32 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + outs := engine.NewTable("", 0) + changes, err := container.Changes() + if err != nil { + return job.Error(err) + } + for _, change := range changes { + out := &engine.Env{} + if err := out.Import(change); err != nil { + return job.Error(err) + } + outs.Add(out) + } + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} diff --git a/components/engine/daemon/commit.go b/components/engine/daemon/commit.go new file mode 100644 index 0000000000..950925ade3 --- /dev/null +++ b/components/engine/daemon/commit.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + var ( + config = container.Config + newConfig runconfig.Config + ) + + if err := job.GetenvJson("config", &newConfig); err != nil { + return job.Error(err) + } + + if err := runconfig.Merge(&newConfig, config); err != nil { + return job.Error(err) + } + + img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", img.ID) + return engine.StatusOK +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository +func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) { + if pause { + container.Pause() + defer container.Unpause() + } + + if err := container.Mount(); err != nil { + return nil, err + } + defer container.Unmount() + + rwTar, err := container.ExportRw() + if err != nil { + return nil, err + } + defer rwTar.Close() + + // Create a new image from the container's base layers + a new layer from container changes + var ( + containerID, containerImage string + containerConfig *runconfig.Config + ) + + if container != nil { + containerID = container.ID + containerImage = container.Image + containerConfig = container.Config + } + + img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) + if err != nil { + return nil, err + } + + // Register the image if needed + if repository != "" { + if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { + return img, err + } + } + return img, nil +} diff --git a/components/engine/daemon/config.go b/components/engine/daemon/config.go new file mode 100644 index 0000000000..a396bd0232 --- /dev/null +++ b/components/engine/daemon/config.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "net" + + "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +const ( + defaultNetworkMtu = 1500 + DisableNetworkBridge = "none" +) + +// Config define the configuration of a docker daemon +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +// FIXME: separate runtime configuration from http api configuration +type Config struct { + Pidfile string + Root string + AutoRestart bool + Dns []string + DnsSearch []string + EnableIptables bool + EnableIpForward bool + DefaultIp net.IP + BridgeIface string + BridgeIP string + InterContainerCommunication bool + GraphDriver string + GraphOptions []string + ExecDriver string + Mtu int + DisableNetwork bool + EnableSelinuxSupport bool + Context map[string][]string +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags() { + flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") + flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") + flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated infavor of --restart policies on docker run") + flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") + flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") + flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") + flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") + flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") + flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") + flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") + flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") + opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") + opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options") + // FIXME: why the inconsistency between "hosts" and "sockets"? + opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") + opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") +} + +func GetDefaultNetworkMtu() int { + if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { + return iface.MTU + } + return defaultNetworkMtu +} diff --git a/components/engine/daemon/container.go b/components/engine/daemon/container.go index 30337de6b5..df6bd66190 100644 --- a/components/engine/daemon/container.go +++ b/components/engine/daemon/container.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "os" "path" "path/filepath" @@ -17,18 +16,21 @@ import ( "github.com/docker/libcontainer/devices" "github.com/docker/libcontainer/label" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/links" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/pkg/networkfs/etchosts" - "github.com/dotcloud/docker/pkg/networkfs/resolvconf" - "github.com/dotcloud/docker/pkg/symlink" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" + + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/links" + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/networkfs/etchosts" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -66,13 +68,14 @@ type Container struct { ExecDriver string command *execdriver.Command - stdout *utils.WriteBroadcaster - stderr *utils.WriteBroadcaster + stdout *broadcastwriter.BroadcastWriter + stderr *broadcastwriter.BroadcastWriter stdin io.ReadCloser stdinPipe io.WriteCloser daemon *Daemon MountLabel, ProcessLabel string + RestartCount int Volumes map[string]string // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. @@ -81,6 +84,7 @@ type Container struct { hostConfig *runconfig.HostConfig activeLinks map[string]*links.Link + monitor *containerMonitor } func (container *Container) FromDisk() error { @@ -105,7 +109,7 @@ func (container *Container) FromDisk() error { return container.readHostConfig() } -func (container *Container) ToDisk() error { +func (container *Container) toDisk() error { data, err := json.Marshal(container) if err != nil { return err @@ -124,6 +128,13 @@ func (container *Container) ToDisk() error { return container.WriteHostConfig() } +func (container *Container) ToDisk() error { + container.Lock() + err := container.toDisk() + container.Unlock() + return err +} + func (container *Container) readHostConfig() error { container.hostConfig = &runconfig.HostConfig{} // If the hostconfig file does not exist, do not read it. @@ -160,6 +171,13 @@ func (container *Container) WriteHostConfig() error { return ioutil.WriteFile(pth, data, 0666) } +func (container *Container) LogEvent(action string) { + d := container.daemon + if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil { + log.Errorf("Error logging event %s for %s: %s", action, container.ID, err) + } +} + func (container *Container) getResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) @@ -208,6 +226,20 @@ func populateCommand(c *Container, env []string) error { return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } + // Build lists of devices allowed and created within the container. + userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices)) + for i, deviceMapping := range c.hostConfig.Devices { + device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) + device.Path = deviceMapping.PathInContainer + if err != nil { + return fmt.Errorf("error gathering device information while adding custom device %s", err) + } + userSpecifiedDevices[i] = device + } + allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...) + + autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...) + // TODO: this can be removed after lxc-conf is fully deprecated mergeLxcConfIntoOptions(c.hostConfig, context) @@ -230,8 +262,10 @@ func populateCommand(c *Container, env []string) error { User: c.Config.User, Config: context, Resources: resources, - AllowedDevices: devices.DefaultAllowedDevices, - AutoCreatedDevices: devices.DefaultAutoCreatedDevices, + AllowedDevices: allowedDevices, + AutoCreatedDevices: autoCreatedDevices, + CapAdd: c.hostConfig.CapAdd, + CapDrop: c.hostConfig.CapDrop, } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} c.command.Env = env @@ -245,6 +279,7 @@ func (container *Container) Start() (err error) { if container.State.IsRunning() { return nil } + // if we encounter and error during start we need to ensure that any other // setup has been cleaned up properly defer func() { @@ -280,9 +315,6 @@ func (container *Container) Start() (err error) { if err := setupMountsForContainer(container); err != nil { return err } - if err := container.startLoggingToDisk(); err != nil { - return err - } return container.waitForStart() } @@ -463,40 +495,8 @@ func (container *Container) releaseNetwork() { container.NetworkSettings = &NetworkSettings{} } -func (container *Container) monitor(callback execdriver.StartCallback) error { - var ( - err error - exitCode int - ) - - pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) - exitCode, err = container.daemon.Run(container, pipes, callback) - if err != nil { - utils.Errorf("Error running container: %s", err) - } - container.State.SetStopped(exitCode) - - // Cleanup - container.cleanup() - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.stdin, container.stdinPipe = io.Pipe() - } - if container.daemon != nil && container.daemon.srv != nil { - container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image)) - } - if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() { - // FIXME: here is race condition between two RUN instructions in Dockerfile - // because they share same runconfig and change image. Must be fixed - // in server/buildfile.go - if err := container.ToDisk(); err != nil { - utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err) - } - } - return err -} - +// cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. func (container *Container) cleanup() { container.releaseNetwork() @@ -506,30 +506,14 @@ func (container *Container) cleanup() { link.Disable() } } - if container.Config.OpenStdin { - if err := container.stdin.Close(); err != nil { - utils.Errorf("%s: Error close stdin: %s", container.ID, err) - } - } - if err := container.stdout.CloseWriters(); err != nil { - utils.Errorf("%s: Error close stdout: %s", container.ID, err) - } - if err := container.stderr.CloseWriters(); err != nil { - utils.Errorf("%s: Error close stderr: %s", container.ID, err) - } - if container.command != nil && container.command.Terminal != nil { - if err := container.command.Terminal.Close(); err != nil { - utils.Errorf("%s: Error closing terminal: %s", container.ID, err) - } - } if err := container.Unmount(); err != nil { - log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) + log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) } } func (container *Container) KillSig(sig int) error { - utils.Debugf("Sending %d to %s", sig, container.ID) + log.Debugf("Sending %d to %s", sig, container.ID) container.Lock() defer container.Unlock() @@ -541,6 +525,18 @@ func (container *Container) KillSig(sig int) error { if !container.State.IsRunning() { return nil } + + // signal to the monitor that it should not restart the container + // after we send the kill signal + container.monitor.ExitOnNext() + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.State.IsRestarting() { + return nil + } + return container.daemon.Kill(container, sig) } @@ -578,7 +574,7 @@ func (container *Container) Kill() error { if _, err := container.State.WaitStop(10 * time.Second); err != nil { // Ensure that we don't kill ourselves if pid := container.State.GetPid(); pid != 0 { - log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) + log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) if err := syscall.Kill(pid, 9); err != nil { return err } @@ -596,7 +592,7 @@ func (container *Container) Stop(seconds int) error { // 1. Send a SIGTERM if err := container.KillSig(15); err != nil { - log.Print("Failed to send SIGTERM to the process, force killing") + log.Infof("Failed to send SIGTERM to the process, force killing") if err := container.KillSig(9); err != nil { return err } @@ -604,7 +600,7 @@ func (container *Container) Stop(seconds int) error { // 2. Wait for the process to exit on its own if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil { - log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := container.Kill(); err != nil { container.State.WaitStop(-1 * time.Second) @@ -733,7 +729,7 @@ func (container *Container) GetSize() (int64, int64) { ) if err := container.Mount(); err != nil { - utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) + log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer container.Unmount() @@ -741,7 +737,7 @@ func (container *Container) GetSize() (int64, int64) { if differ, ok := container.daemon.driver.(graphdriver.Differ); ok { sizeRw, err = differ.DiffSize(container.ID) if err != nil { - utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) + log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 @@ -838,7 +834,7 @@ func (container *Container) DisableLink(name string) { if link, exists := container.activeLinks[name]; exists { link.Disable() } else { - utils.Debugf("Could not find active link for %s", name) + log.Debugf("Could not find active link for %s", name) } } } @@ -853,18 +849,16 @@ func (container *Container) setupContainerDns() error { daemon = container.daemon ) - if config.NetworkMode == "host" { - container.ResolvConfPath = "/etc/resolv.conf" - return nil - } - resolvConf, err := resolvconf.Get() if err != nil { return err } + container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf") + if err != nil { + return err + } - // If custom dns exists, then create a resolv.conf for the container - if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 { + if config.NetworkMode != "host" && (len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0) { var ( dns = resolvconf.GetNameservers(resolvConf) dnsSearch = resolvconf.GetSearchDomains(resolvConf) @@ -879,18 +873,9 @@ func (container *Container) setupContainerDns() error { } else if len(daemon.config.DnsSearch) > 0 { dnsSearch = daemon.config.DnsSearch } - - resolvConfPath, err := container.getRootResourcePath("resolv.conf") - if err != nil { - return err - } - container.ResolvConfPath = resolvConfPath - return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) - } else { - container.ResolvConfPath = "/etc/resolv.conf" } - return nil + return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644) } func (container *Container) initializeNetworking() error { @@ -950,15 +935,15 @@ func (container *Container) initializeNetworking() error { // Make sure the config is compatible with the current kernel func (container *Container) verifyDaemonSettings() { if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { - log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") + log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.") container.Config.Memory = 0 } if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit { - log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.") container.Config.MemorySwap = -1 } if container.daemon.sysInfo.IPv4ForwardingDisabled { - log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") + log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work") } } @@ -1019,9 +1004,12 @@ func (container *Container) setupLinkedContainers() ([]string, error) { func (container *Container) createDaemonEnvironment(linkedEnv []string) []string { // Setup environment env := []string{ - "HOME=/", "PATH=" + DefaultPathEnv, "HOSTNAME=" + container.Config.Hostname, + // Note: we don't set HOME here because it'll get autoset intelligently + // based on the value of USER inside dockerinit, but only if it isn't + // set already (ie, that can be overridden by setting HOME via -e or ENV + // in a Dockerfile). } if container.Config.Tty { env = append(env, "TERM=xterm") @@ -1080,38 +1068,16 @@ func (container *Container) startLoggingToDisk() error { } func (container *Container) waitForStart() error { - callback := func(command *execdriver.Command) { - if command.Tty { - // The callback is called after the process Start() - // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace - // which we close here. - if c, ok := command.Stdout.(io.Closer); ok { - c.Close() - } - } - container.State.SetRunning(command.Pid()) - if err := container.ToDisk(); err != nil { - utils.Debugf("%s", err) - } - } + container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) - // We use a callback here instead of a goroutine and an chan for - // syncronization purposes - cErr := utils.Go(func() error { return container.monitor(callback) }) - - waitStart := make(chan struct{}) - - go func() { - container.State.WaitRunning(-1 * time.Second) - close(waitStart) - }() - - // Start should not return until the process is actually running + // block until we either receive an error from the initial start of the container's + // process or until the process is running in the container select { - case <-waitStart: - case err := <-cErr: + case <-container.monitor.startSignal: + case err := <-utils.Go(container.monitor.Start): return err } + return nil } diff --git a/components/engine/daemon/container_unit_test.go b/components/engine/daemon/container_unit_test.go index 0a8e69ab00..1b1b934f42 100644 --- a/components/engine/daemon/container_unit_test.go +++ b/components/engine/daemon/container_unit_test.go @@ -1,7 +1,7 @@ package daemon import ( - "github.com/dotcloud/docker/nat" + "github.com/docker/docker/nat" "testing" ) @@ -89,6 +89,41 @@ func TestParseNetworkOptsPublic(t *testing.T) { } } +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + t.Logf("%v", len(ports)) + t.Logf("%v", bindings) + if len(ports) != 0 { + t.Logf("Expected nil got %s", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %s", len(bindings)) + t.Fail() + } +} + func TestParseNetworkOptsUdp(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) if err != nil { diff --git a/components/engine/daemon/copy.go b/components/engine/daemon/copy.go new file mode 100644 index 0000000000..9d18b010c0 --- /dev/null +++ b/components/engine/daemon/copy.go @@ -0,0 +1,33 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) + } + + var ( + name = job.Args[0] + resource = job.Args[1] + ) + + if container := daemon.Get(name); container != nil { + + data, err := container.Copy(resource) + if err != nil { + return job.Error(err) + } + defer data.Close() + + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/components/engine/daemon/create.go b/components/engine/daemon/create.go new file mode 100644 index 0000000000..3c6827eeec --- /dev/null +++ b/components/engine/daemon/create.go @@ -0,0 +1,86 @@ +package daemon + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { + var name string + if len(job.Args) == 1 { + name = job.Args[0] + } else if len(job.Args) > 1 { + return job.Errorf("Usage: %s", job.Name) + } + config := runconfig.ContainerConfigFromJob(job) + if config.Memory != 0 && config.Memory < 524288 { + return job.Errorf("Minimum memory limit allowed is 512k") + } + if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit { + job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") + config.Memory = 0 + } + if config.Memory > 0 && !daemon.SystemConfig().SwapLimit { + job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + container, buildWarnings, err := daemon.Create(config, name) + if err != nil { + if daemon.Graph().IsNotExist(err) { + _, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) + } + return job.Error(err) + } + if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { + job.Errorf("IPv4 forwarding is disabled.\n") + } + container.LogEvent("create") + // FIXME: this is necessary because daemon.Create might return a nil container + // with a non-nil error. This should not happen! Once it's fixed we + // can remove this workaround. + if container != nil { + job.Printf("%s\n", container.ID) + } + for _, warning := range buildWarnings { + job.Errorf("%s\n", warning) + } + return engine.StatusOK +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) { + var ( + container *Container + warnings []string + ) + + img, err := daemon.repositories.LookupImage(config.Image) + if err != nil { + return nil, nil, err + } + if err := img.CheckDepth(); err != nil { + return nil, nil, err + } + if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { + return nil, nil, err + } + if container, err = daemon.newContainer(name, config, img); err != nil { + return nil, nil, err + } + if err := daemon.createRootfs(container, img); err != nil { + return nil, nil, err + } + if err := container.ToDisk(); err != nil { + return nil, nil, err + } + if err := daemon.Register(container); err != nil { + return nil, nil, err + } + return container, warnings, nil +} diff --git a/components/engine/daemon/daemon.go b/components/engine/daemon/daemon.go index a94a4458ad..811cb3391e 100644 --- a/components/engine/daemon/daemon.go +++ b/components/engine/daemon/daemon.go @@ -4,42 +4,40 @@ import ( "fmt" "io" "io/ioutil" - "log" "os" "path" "regexp" + "runtime" "strings" "sync" "time" "github.com/docker/libcontainer/label" - "github.com/docker/libcontainer/selinux" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/daemon/execdriver/execdrivers" - "github.com/dotcloud/docker/daemon/execdriver/lxc" - "github.com/dotcloud/docker/daemon/graphdriver" - _ "github.com/dotcloud/docker/daemon/graphdriver/vfs" - _ "github.com/dotcloud/docker/daemon/networkdriver/bridge" - "github.com/dotcloud/docker/daemon/networkdriver/portallocator" - "github.com/dotcloud/docker/daemonconfig" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/namesgenerator" - "github.com/dotcloud/docker/pkg/networkfs/resolvconf" - "github.com/dotcloud/docker/pkg/sysinfo" - "github.com/dotcloud/docker/pkg/truncindex" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" -) -// Set the max depth to the aufs default that most -// kernels are compiled with -// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk -const MaxImageDepth = 127 + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/execdrivers" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" + _ "github.com/docker/docker/daemon/networkdriver/bridge" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" +) var ( DefaultDns = []string{"8.8.8.8", "8.8.4.4"} @@ -91,38 +89,65 @@ type Daemon struct { idIndex *truncindex.TruncIndex sysInfo *sysinfo.SysInfo volumes *graph.Graph - srv Server eng *engine.Engine - config *daemonconfig.Config + config *Config containerGraph *graphdb.Database driver graphdriver.Driver execDriver execdriver.Driver - Sockets []string } // Install installs daemon capabilities to eng. func (daemon *Daemon) Install(eng *engine.Engine) error { - return eng.Register("container_inspect", daemon.ContainerInspect) -} - -// List returns an array of all containers registered in the daemon. -func (daemon *Daemon) List() []*Container { - return daemon.containers.List() + // FIXME: rename "delete" to "rm" for consistency with the CLI command + // FIXME: rename ContainerDestroy to ContainerRm for consistency with the CLI command + // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ + for name, method := range map[string]engine.Handler{ + "attach": daemon.ContainerAttach, + "build": daemon.CmdBuild, + "commit": daemon.ContainerCommit, + "container_changes": daemon.ContainerChanges, + "container_copy": daemon.ContainerCopy, + "container_inspect": daemon.ContainerInspect, + "containers": daemon.Containers, + "create": daemon.ContainerCreate, + "delete": daemon.ContainerDestroy, + "export": daemon.ContainerExport, + "info": daemon.CmdInfo, + "kill": daemon.ContainerKill, + "logs": daemon.ContainerLogs, + "pause": daemon.ContainerPause, + "resize": daemon.ContainerResize, + "restart": daemon.ContainerRestart, + "start": daemon.ContainerStart, + "stop": daemon.ContainerStop, + "top": daemon.ContainerTop, + "unpause": daemon.ContainerUnpause, + "wait": daemon.ContainerWait, + "image_delete": daemon.ImageDelete, // FIXME: see above + } { + if err := eng.Register(name, method); err != nil { + return err + } + } + if err := daemon.Repositories().Install(eng); err != nil { + return err + } + // FIXME: this hack is necessary for legacy integration tests to access + // the daemon object. + eng.Hack_SetGlobalVar("httpapi.daemon", daemon) + return nil } // Get looks for a container by the specified ID or name, and returns it. // If the container is not found, or if an error occurs, nil is returned. func (daemon *Daemon) Get(name string) *Container { + if id, err := daemon.idIndex.Get(name); err == nil { + return daemon.containers.Get(id) + } if c, _ := daemon.GetByName(name); c != nil { return c } - - id, err := daemon.idIndex.Get(name) - if err != nil { - return nil - } - - return daemon.containers.Get(id) + return nil } // Exists returns a true if a container of the specified ID or name exists, @@ -142,20 +167,24 @@ func (daemon *Daemon) load(id string) (*Container, error) { if err := container.FromDisk(); err != nil { return nil, err } + if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } + + container.readHostConfig() + return container, nil } // Register makes a container object usable by the daemon as // This is a wrapper for register func (daemon *Daemon) Register(container *Container) error { - return daemon.register(container, true, nil) + return daemon.register(container, true) } // register makes a container object usable by the daemon as -func (daemon *Daemon) register(container *Container, updateSuffixarray bool, containersToStart *[]*Container) error { +func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } @@ -169,8 +198,8 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con container.daemon = daemon // Attach to stdout and stderr - container.stderr = utils.NewWriteBroadcaster() - container.stdout = utils.NewWriteBroadcaster() + container.stderr = broadcastwriter.New() + container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() @@ -188,7 +217,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { - utils.Debugf("killing old running container %s", container.ID) + log.Debugf("killing old running container %s", container.ID) existingPid := container.State.Pid container.State.SetStopped(0) @@ -205,36 +234,28 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con var err error cmd.Process, err = os.FindProcess(existingPid) if err != nil { - utils.Debugf("cannot find existing process for %d", existingPid) + log.Debugf("cannot find existing process for %d", existingPid) } daemon.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { - utils.Debugf("unmount error %s", err) + log.Debugf("unmount error %s", err) } if err := container.ToDisk(); err != nil { - utils.Debugf("saving stopped state to disk %s", err) + log.Debugf("saving stopped state to disk %s", err) } info := daemon.execDriver.Info(container.ID) if !info.IsRunning() { - utils.Debugf("Container %s was supposed to be running but is not.", container.ID) + log.Debugf("Container %s was supposed to be running but is not.", container.ID) - utils.Debugf("Marking as stopped") + log.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } - - if daemon.config.AutoRestart { - utils.Debugf("Marking as restarting") - - if containersToStart != nil { - *containersToStart = append(*containersToStart, container) - } - } } } return nil @@ -249,13 +270,13 @@ func (daemon *Daemon) ensureName(container *Container) error { container.Name = name if err := container.ToDisk(); err != nil { - utils.Debugf("Error saving container name %s", err) + log.Debugf("Error saving container name %s", err) } } return nil } -func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error { +func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error { log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) if err != nil { return err @@ -264,56 +285,15 @@ func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) return nil } -// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. -func (daemon *Daemon) Destroy(container *Container) error { - if container == nil { - return fmt.Errorf("The given container is ") - } - - element := daemon.containers.Get(container.ID) - if element == nil { - return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) - } - - if err := container.Stop(3); err != nil { - return err - } - - // Deregister the container before removing its directory, to avoid race conditions - daemon.idIndex.Delete(container.ID) - daemon.containers.Delete(container.ID) - - if _, err := daemon.containerGraph.Purge(container.ID); err != nil { - utils.Debugf("Unable to remove container from link graph: %s", err) - } - - if err := daemon.driver.Remove(container.ID); err != nil { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) - } - - initID := fmt.Sprintf("%s-init", container.ID) - if err := daemon.driver.Remove(initID); err != nil { - return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) - } - - if err := os.RemoveAll(container.root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } - selinux.FreeLxcContexts(container.ProcessLabel) - - return nil -} - func (daemon *Daemon) restore() error { var ( - debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") - containers = make(map[string]*Container) - currentDriver = daemon.driver.String() - containersToStart = []*Container{} + debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") + containers = make(map[string]*Container) + currentDriver = daemon.driver.String() ) if !debug { - fmt.Printf("Loading containers: ") + log.Infof("Loading containers: ") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { @@ -327,29 +307,38 @@ func (daemon *Daemon) restore() error { fmt.Print(".") } if err != nil { - utils.Errorf("Failed to load container %v: %v", id, err) + log.Errorf("Failed to load container %v: %v", id, err) continue } // Ignore the container if it does not support the current driver being used by the graph - if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { - utils.Debugf("Loaded container %v", container.ID) + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + log.Debugf("Loaded container %v", container.ID) + containers[container.ID] = container } else { - utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } + registeredContainers := []*Container{} + if entities := daemon.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { if !debug { fmt.Print(".") } + e := entities[p] + if container, ok := containers[e.ID()]; ok { - if err := daemon.register(container, false, &containersToStart); err != nil { - utils.Debugf("Failed to register container %s: %s", container.ID, err) + if err := daemon.register(container, false); err != nil { + log.Debugf("Failed to register container %s: %s", container.ID, err) } + + registeredContainers = append(registeredContainers, container) + + // delete from the map so that a new name is not automatically generated delete(containers, e.ID()) } } @@ -360,72 +349,40 @@ func (daemon *Daemon) restore() error { // Try to set the default name for a container if it exists prior to links container.Name, err = daemon.generateNewName(container.ID) if err != nil { - utils.Debugf("Setting default id - %s", err) + log.Debugf("Setting default id - %s", err) } - if err := daemon.register(container, false, &containersToStart); err != nil { - utils.Debugf("Failed to register container %s: %s", container.ID, err) + + if err := daemon.register(container, false); err != nil { + log.Debugf("Failed to register container %s: %s", container.ID, err) } + + registeredContainers = append(registeredContainers, container) } - for _, container := range containersToStart { - utils.Debugf("Starting container %d", container.ID) - if err := container.Start(); err != nil { - utils.Debugf("Failed to start container %s: %s", container.ID, err) + // check the restart policy on the containers and restart any container with + // the restart policy of "always" + if daemon.config.AutoRestart { + log.Debugf("Restarting containers...") + + for _, container := range registeredContainers { + if container.hostConfig.RestartPolicy.Name == "always" || + (container.hostConfig.RestartPolicy.Name == "on-failure" && container.State.ExitCode != 0) { + log.Debugf("Starting container %s", container.ID) + + if err := container.Start(); err != nil { + log.Debugf("Failed to start container %s: %s", container.ID, err) + } + } } } if !debug { - fmt.Printf(": done.\n") + log.Infof(": done.") } return nil } -// Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) { - var ( - container *Container - warnings []string - ) - - img, err := daemon.repositories.LookupImage(config.Image) - if err != nil { - return nil, nil, err - } - if err := daemon.checkImageDepth(img); err != nil { - return nil, nil, err - } - if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { - return nil, nil, err - } - if container, err = daemon.newContainer(name, config, img); err != nil { - return nil, nil, err - } - if err := daemon.createRootfs(container, img); err != nil { - return nil, nil, err - } - if err := container.ToDisk(); err != nil { - return nil, nil, err - } - if err := daemon.Register(container); err != nil { - return nil, nil, err - } - return container, warnings, nil -} - -func (daemon *Daemon) checkImageDepth(img *image.Image) error { - // We add 2 layers to the depth because the container's rw and - // init layer add to the restriction - depth, err := img.Depth() - if err != nil { - return err - } - if depth+2 >= MaxImageDepth { - return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) - } - return nil -} - func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool { if config != nil { if config.PortSpecs != nil { @@ -618,51 +575,6 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error return nil } -// Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository -func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) { - if pause { - container.Pause() - defer container.Unpause() - } - - if err := container.Mount(); err != nil { - return nil, err - } - defer container.Unmount() - - rwTar, err := container.ExportRw() - if err != nil { - return nil, err - } - defer rwTar.Close() - - // Create a new image from the container's base layers + a new layer from container changes - var ( - containerID, containerImage string - containerConfig *runconfig.Config - ) - - if container != nil { - containerID = container.ID - containerImage = container.Image - containerConfig = container.Config - } - - img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) - if err != nil { - return nil, err - } - - // Register the image if needed - if repository != "" { - if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { - return img, err - } - } - return img, nil -} - func GetFullContainerName(name string) (string, error) { if name == "" { return "", fmt.Errorf("Container name cannot be empty") @@ -723,7 +635,7 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { if hostConfig != nil && hostConfig.Links != nil { for _, l := range hostConfig.Links { - parts, err := utils.PartParser("name:alias", l) + parts, err := parsers.PartParser("name:alias", l) if err != nil { return err } @@ -750,7 +662,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. } // FIXME: harmonize with NewGraph() -func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) { +func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { daemon, err := NewDaemonFromDirectory(config, eng) if err != nil { return nil, err @@ -758,11 +670,71 @@ func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) return daemon, nil } -func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) { - if !config.EnableSelinuxSupport { - selinux.SetDisabled() +func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { + // Apply configuration defaults + if config.Mtu == 0 { + // FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore + config.Mtu = GetDefaultNetworkMtu() + } + // Check for mutually incompatible config options + if config.BridgeIface != "" && config.BridgeIP != "" { + return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") + } + if !config.EnableIptables && !config.InterContainerCommunication { + return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") + } + // FIXME: DisableNetworkBidge doesn't need to be public anymore + config.DisableNetwork = config.BridgeIface == DisableNetworkBridge + + // Claim the pidfile first, to avoid any and all unexpected race conditions. + // Some of the init doesn't need a pidfile lock - but let's not try to be smart. + if config.Pidfile != "" { + if err := utils.CreatePidFile(config.Pidfile); err != nil { + return nil, err + } + eng.OnShutdown(func() { + // Always release the pidfile last, just in case + utils.RemovePidFile(config.Pidfile) + }) } + // Check that the system is supported and we have sufficient privileges + // FIXME: return errors instead of calling Fatal + if runtime.GOOS != "linux" { + log.Fatalf("The Docker daemon is only supported on linux") + } + if os.Geteuid() != 0 { + log.Fatalf("The Docker daemon needs to be run as root") + } + if err := checkKernelAndArch(); err != nil { + log.Fatalf(err.Error()) + } + + // set up the TempDir to use a canonical path + tmp, err := utils.TempDir(config.Root) + if err != nil { + log.Fatalf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := utils.ReadSymlinkedDirectory(tmp) + if err != nil { + log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + if !config.EnableSelinuxSupport { + selinuxSetDisabled() + } + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = utils.ReadSymlinkedDirectory(config.Root) + if err != nil { + log.Fatalf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + config.Root = realRoot // Create the root directory if it doesn't exists if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { return nil, err @@ -776,7 +748,12 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D if err != nil { return nil, err } - utils.Debugf("Using graph driver %s", driver) + log.Debugf("Using graph driver %s", driver) + + // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled + if config.EnableSelinuxSupport && driver.String() == "btrfs" { + return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!") + } daemonRepo := path.Join(config.Root, "containers") @@ -789,7 +766,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D return nil, err } - utils.Debugf("Creating images graph") + log.Debugf("Creating images graph") g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) if err != nil { return nil, err @@ -801,12 +778,12 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D if err != nil { return nil, err } - utils.Debugf("Creating volumes graph") + log.Debugf("Creating volumes graph") volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) if err != nil { return nil, err } - utils.Debugf("Creating repository list") + log.Debugf("Creating repository list") repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) @@ -873,34 +850,52 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D sysInitPath: sysInitPath, execDriver: ed, eng: eng, - Sockets: config.Sockets, } - if err := daemon.checkLocaldns(); err != nil { return nil, err } if err := daemon.restore(); err != nil { return nil, err } + // Setup shutdown handlers + // FIXME: can these shutdown handlers be registered closer to their source? + eng.OnShutdown(func() { + // FIXME: if these cleanup steps can be called concurrently, register + // them as separate handlers to speed up total shutdown time + // FIXME: use engine logging instead of log.Errorf + if err := daemon.shutdown(); err != nil { + log.Errorf("daemon.shutdown(): %s", err) + } + if err := portallocator.ReleaseAll(); err != nil { + log.Errorf("portallocator.ReleaseAll(): %s", err) + } + if err := daemon.driver.Cleanup(); err != nil { + log.Errorf("daemon.driver.Cleanup(): %s", err.Error()) + } + if err := daemon.containerGraph.Close(); err != nil { + log.Errorf("daemon.containerGraph.Close(): %s", err.Error()) + } + }) + return daemon, nil } func (daemon *Daemon) shutdown() error { group := sync.WaitGroup{} - utils.Debugf("starting clean shutdown of all containers...") + log.Debugf("starting clean shutdown of all containers...") for _, container := range daemon.List() { c := container if c.State.IsRunning() { - utils.Debugf("stopping %s", c.ID) + log.Debugf("stopping %s", c.ID) group.Add(1) go func() { defer group.Done() if err := c.KillSig(15); err != nil { - utils.Debugf("kill 15 error for %s - %s", c.ID, err) + log.Debugf("kill 15 error for %s - %s", c.ID, err) } c.State.WaitStop(-1 * time.Second) - utils.Debugf("container stopped %s", c.ID) + log.Debugf("container stopped %s", c.ID) }() } } @@ -909,30 +904,6 @@ func (daemon *Daemon) shutdown() error { return nil } -func (daemon *Daemon) Close() error { - errorsStrings := []string{} - if err := daemon.shutdown(); err != nil { - utils.Errorf("daemon.shutdown(): %s", err) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := portallocator.ReleaseAll(); err != nil { - utils.Errorf("portallocator.ReleaseAll(): %s", err) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := daemon.driver.Cleanup(); err != nil { - utils.Errorf("daemon.driver.Cleanup(): %s", err.Error()) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := daemon.containerGraph.Close(); err != nil { - utils.Errorf("daemon.containerGraph.Close(): %s", err.Error()) - errorsStrings = append(errorsStrings, err.Error()) - } - if len(errorsStrings) > 0 { - return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) - } - return nil -} - func (daemon *Daemon) Mount(container *Container) error { dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) if err != nil { @@ -1023,6 +994,8 @@ func (daemon *Daemon) Kill(c *Container, sig int) error { // from the content root, including images, volumes and // container filesystems. // Again: this will remove your entire docker daemon! +// FIXME: this is deprecated, and only used in legacy +// tests. Please remove. func (daemon *Daemon) Nuke() error { var wg sync.WaitGroup for _, container := range daemon.List() { @@ -1033,7 +1006,6 @@ func (daemon *Daemon) Nuke() error { }(container) } wg.Wait() - daemon.Close() return os.RemoveAll(daemon.config.Root) } @@ -1050,7 +1022,7 @@ func (daemon *Daemon) Repositories() *graph.TagStore { return daemon.repositories } -func (daemon *Daemon) Config() *daemonconfig.Config { +func (daemon *Daemon) Config() *Config { return daemon.config } @@ -1078,18 +1050,70 @@ func (daemon *Daemon) ContainerGraph() *graphdb.Database { return daemon.containerGraph } -func (daemon *Daemon) SetServer(server Server) { - daemon.srv = server -} - func (daemon *Daemon) checkLocaldns() error { resolvConf, err := resolvconf.Get() if err != nil { return err } if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns) + log.Infof("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns) daemon.config.Dns = DefaultDns } return nil } + +func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { + // Retrieve all images + images, err := daemon.Graph().Map() + if err != nil { + return nil, err + } + + // Store the tree in a map of map (map[parentId][childId]) + imageMap := make(map[string]map[string]struct{}) + for _, img := range images { + if _, exists := imageMap[img.Parent]; !exists { + imageMap[img.Parent] = make(map[string]struct{}) + } + imageMap[img.Parent][img.ID] = struct{}{} + } + + // Loop on the children of the given image and check the config + var match *image.Image + for elem := range imageMap[imgID] { + img, err := daemon.Graph().Get(elem) + if err != nil { + return nil, err + } + if runconfig.Compare(&img.ContainerConfig, config) { + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil +} + +func checkKernelAndArch() error { + // Check for unsupported architectures + if runtime.GOARCH != "amd64" { + return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) + } + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.8 crashes are clearer. + // For details see http://github.com/docker/docker/issues/407 + if k, err := kernel.GetKernelVersion(); err != nil { + log.Infof("WARNING: %s", err) + } else { + if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) + } + } + } + return nil +} diff --git a/components/engine/daemon/daemon_aufs.go b/components/engine/daemon/daemon_aufs.go index ee3e1d1a58..a370a4ce3c 100644 --- a/components/engine/daemon/daemon_aufs.go +++ b/components/engine/daemon/daemon_aufs.go @@ -3,17 +3,17 @@ package daemon import ( - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/daemon/graphdriver/aufs" - "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/aufs" + "github.com/docker/docker/graph" + "github.com/docker/docker/pkg/log" ) // Given the graphdriver ad, if it is aufs, then migrate it. // If aufs driver is not built, this func is a noop. func migrateIfAufs(driver graphdriver.Driver, root string) error { if ad, ok := driver.(*aufs.Driver); ok { - utils.Debugf("Migrating existing containers") + log.Debugf("Migrating existing containers") if err := ad.Migrate(root, graph.SetupInitLayer); err != nil { return err } diff --git a/components/engine/daemon/daemon_btrfs.go b/components/engine/daemon/daemon_btrfs.go index f343d699c4..cd505c356b 100644 --- a/components/engine/daemon/daemon_btrfs.go +++ b/components/engine/daemon/daemon_btrfs.go @@ -3,5 +3,5 @@ package daemon import ( - _ "github.com/dotcloud/docker/daemon/graphdriver/btrfs" + _ "github.com/docker/docker/daemon/graphdriver/btrfs" ) diff --git a/components/engine/daemon/daemon_devicemapper.go b/components/engine/daemon/daemon_devicemapper.go index ddf8107414..477754559a 100644 --- a/components/engine/daemon/daemon_devicemapper.go +++ b/components/engine/daemon/daemon_devicemapper.go @@ -3,5 +3,5 @@ package daemon import ( - _ "github.com/dotcloud/docker/daemon/graphdriver/devmapper" + _ "github.com/docker/docker/daemon/graphdriver/devmapper" ) diff --git a/components/engine/daemon/daemon_no_aufs.go b/components/engine/daemon/daemon_no_aufs.go index 2d9fed29b9..06cdc776d7 100644 --- a/components/engine/daemon/daemon_no_aufs.go +++ b/components/engine/daemon/daemon_no_aufs.go @@ -3,7 +3,7 @@ package daemon import ( - "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver" ) func migrateIfAufs(driver graphdriver.Driver, root string) error { diff --git a/components/engine/daemon/delete.go b/components/engine/daemon/delete.go new file mode 100644 index 0000000000..501aed3e38 --- /dev/null +++ b/components/engine/daemon/delete.go @@ -0,0 +1,174 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" +) + +// FIXME: rename to ContainerRemove for consistency with the CLI command. +func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + removeVolume := job.GetenvBool("removeVolume") + removeLink := job.GetenvBool("removeLink") + forceRemove := job.GetenvBool("forceRemove") + container := daemon.Get(name) + + if removeLink { + if container == nil { + return job.Errorf("No such link: %s", name) + } + name, err := GetFullContainerName(name) + if err != nil { + job.Error(err) + } + parent, n := path.Split(name) + if parent == "/" { + return job.Errorf("Conflict, cannot remove the default name of the container") + } + pe := daemon.ContainerGraph().Get(parent) + if pe == nil { + return job.Errorf("Cannot get parent %s for name %s", parent, name) + } + parentContainer := daemon.Get(pe.ID()) + + if parentContainer != nil { + parentContainer.DisableLink(n) + } + + if err := daemon.ContainerGraph().Delete(name); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + + if container != nil { + if container.State.IsRunning() { + if forceRemove { + if err := container.Kill(); err != nil { + return job.Errorf("Could not kill running container, cannot remove - %v", err) + } + } else { + return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f") + } + } + if err := daemon.Destroy(container); err != nil { + return job.Errorf("Cannot destroy container %s: %s", name, err) + } + container.LogEvent("destroy") + + if removeVolume { + var ( + volumes = make(map[string]struct{}) + binds = make(map[string]struct{}) + usedVolumes = make(map[string]*Container) + ) + + // the volume id is always the base of the path + getVolumeId := func(p string) string { + return filepath.Base(strings.TrimSuffix(p, "/layer")) + } + + // populate bind map so that they can be skipped and not removed + for _, bind := range container.HostConfig().Binds { + source := strings.Split(bind, ":")[0] + // TODO: refactor all volume stuff, all of it + // it is very important that we eval the link or comparing the keys to container.Volumes will not work + // + // eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it + p, err := filepath.EvalSymlinks(source) + if err != nil && !os.IsNotExist(err) { + return job.Error(err) + } + if p != "" { + source = p + } + binds[source] = struct{}{} + } + + // Store all the deleted containers volumes + for _, volumeId := range container.Volumes { + // Skip the volumes mounted from external + // bind mounts here will will be evaluated for a symlink + if _, exists := binds[volumeId]; exists { + continue + } + + volumeId = getVolumeId(volumeId) + volumes[volumeId] = struct{}{} + } + + // Retrieve all volumes from all remaining containers + for _, container := range daemon.List() { + for _, containerVolumeId := range container.Volumes { + containerVolumeId = getVolumeId(containerVolumeId) + usedVolumes[containerVolumeId] = container + } + } + + for volumeId := range volumes { + // If the requested volu + if c, exists := usedVolumes[volumeId]; exists { + log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.", volumeId, c.ID) + continue + } + if err := daemon.Volumes().Delete(volumeId); err != nil { + return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) + } + } + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} + +// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. +// FIXME: rename to Rm for consistency with the CLI command +func (daemon *Daemon) Destroy(container *Container) error { + if container == nil { + return fmt.Errorf("The given container is ") + } + + element := daemon.containers.Get(container.ID) + if element == nil { + return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) + } + + if err := container.Stop(3); err != nil { + return err + } + + // Deregister the container before removing its directory, to avoid race conditions + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + + if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + log.Debugf("Unable to remove container from link graph: %s", err) + } + + if err := daemon.driver.Remove(container.ID); err != nil { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) + } + + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Remove(initID); err != nil { + return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) + } + + if err := os.RemoveAll(container.root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + selinuxFreeLxcContexts(container.ProcessLabel) + + return nil +} diff --git a/components/engine/daemon/execdriver/driver.go b/components/engine/daemon/execdriver/driver.go index a3d3bc260a..121c6a5a03 100644 --- a/components/engine/daemon/execdriver/driver.go +++ b/components/engine/daemon/execdriver/driver.go @@ -20,47 +20,7 @@ var ( ErrDriverNotFound = errors.New("The requested docker init has not been found") ) -var dockerInitFcts map[string]InitFunc - -type ( - StartCallback func(*Command) - InitFunc func(i *InitArgs) error -) - -func RegisterInitFunc(name string, fct InitFunc) error { - if dockerInitFcts == nil { - dockerInitFcts = make(map[string]InitFunc) - } - if _, ok := dockerInitFcts[name]; ok { - return ErrDriverAlreadyRegistered - } - dockerInitFcts[name] = fct - return nil -} - -func GetInitFunc(name string) (InitFunc, error) { - fct, ok := dockerInitFcts[name] - if !ok { - return nil, ErrDriverNotFound - } - return fct, nil -} - -// Args provided to the init function for a driver -type InitArgs struct { - User string - Gateway string - Ip string - WorkDir string - Privileged bool - Env []string - Args []string - Mtu int - Driver string - Console string - Pipe int - Root string -} +type StartCallback func(*Command) // Driver specific information based on // processes registered with the driver @@ -140,6 +100,8 @@ type Command struct { Mounts []Mount `json:"mounts"` AllowedDevices []*devices.Device `json:"allowed_devices"` AutoCreatedDevices []*devices.Device `json:"autocreated_devices"` + CapAdd []string `json:"cap_add"` + CapDrop []string `json:"cap_drop"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path diff --git a/components/engine/daemon/execdriver/execdrivers/execdrivers.go b/components/engine/daemon/execdriver/execdrivers/execdrivers.go index 2e18454a09..2a050b4834 100644 --- a/components/engine/daemon/execdriver/execdrivers/execdrivers.go +++ b/components/engine/daemon/execdriver/execdrivers/execdrivers.go @@ -2,10 +2,10 @@ package execdrivers import ( "fmt" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/daemon/execdriver/lxc" - "github.com/dotcloud/docker/daemon/execdriver/native" - "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/lxc" + "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/pkg/sysinfo" "path" ) @@ -15,7 +15,7 @@ func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdrive // we want to give the lxc driver the full docker root because it needs // to access and write config and template files in /var/lib/docker/containers/* // to be backwards compatible - return lxc.NewDriver(root, sysInfo.AppArmor) + return lxc.NewDriver(root, initPath, sysInfo.AppArmor) case "native": return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) } diff --git a/components/engine/daemon/execdriver/lxc/driver.go b/components/engine/daemon/execdriver/lxc/driver.go index 59daf1afe1..3b870172bf 100644 --- a/components/engine/daemon/execdriver/lxc/driver.go +++ b/components/engine/daemon/execdriver/lxc/driver.go @@ -3,69 +3,47 @@ package lxc import ( "encoding/json" "fmt" + "io" "io/ioutil" - "log" "os" "os/exec" "path" "path/filepath" - "runtime" "strconv" "strings" "syscall" "time" + "github.com/kr/pty" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/label" "github.com/docker/libcontainer/mount/nodes" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/utils" ) const DriverName = "lxc" -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - runtime.LockOSThread() - if err := setupEnv(args); err != nil { - return err - } - if err := setupHostname(args); err != nil { - return err - } - if err := setupNetworking(args); err != nil { - return err - } - if err := finalizeNamespace(args); err != nil { - return err - } - - path, err := exec.LookPath(args.Args[0]) - if err != nil { - log.Printf("Unable to locate %v", args.Args[0]) - os.Exit(127) - } - if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { - return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) - } - panic("Unreachable") - }) -} - type driver struct { root string // root path for the driver to use + initPath string apparmor bool sharedRoot bool } -func NewDriver(root string, apparmor bool) (*driver, error) { +func NewDriver(root, initPath string, apparmor bool) (*driver, error) { // setup unconfined symlink if err := linkLxcStart(root); err != nil { return nil, err } + return &driver{ apparmor: apparmor, root: root, + initPath: initPath, sharedRoot: rootIsShared(), }, nil } @@ -76,9 +54,25 @@ func (d *driver) Name() string { } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - if err := execdriver.SetTerminal(c, pipes); err != nil { - return -1, err + var ( + term execdriver.Terminal + err error + ) + + if c.Tty { + term, err = NewTtyConsole(c, pipes) + } else { + term, err = execdriver.NewStdConsole(c, pipes) } + c.Terminal = term + + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: d.initPath, + Destination: c.InitPath, + Writable: false, + Private: true, + }) + if err := d.generateEnvConfig(c); err != nil { return -1, err } @@ -92,8 +86,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba "-f", configPath, "--", c.InitPath, - "-driver", - DriverName, } if c.Network.Interface != nil { @@ -122,6 +114,14 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba params = append(params, "-w", c.WorkingDir) } + if len(c.CapAdd) > 0 { + params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":"))) + } + + if len(c.CapDrop) > 0 { + params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":"))) + } + params = append(params, "--", c.Entrypoint) params = append(params, c.Arguments...) @@ -320,7 +320,7 @@ func (i *info) IsRunning() bool { output, err := i.driver.getInfo(i.ID) if err != nil { - utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) return false } if strings.Contains(string(output), "RUNNING") { @@ -447,7 +447,83 @@ func (d *driver) generateEnvConfig(c *execdriver.Command) error { return err } p := path.Join(d.root, "containers", c.ID, "config.env") - c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true}) + c.Mounts = append(c.Mounts, execdriver.Mount{ + Source: p, + Destination: "/.dockerenv", + Writable: false, + Private: true, + }) return ioutil.WriteFile(p, data, 0600) } + +type TtyConsole struct { + MasterPty *os.File + SlavePty *os.File +} + +func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) { + // lxc is special in that we cannot create the master outside of the container without + // opening the slave because we have nothing to provide to the cmd. We have to open both then do + // the crazy setup on command right now instead of passing the console path to lxc and telling it + // to open up that console. we save a couple of openfiles in the native driver because we can do + // this. + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + SlavePty: ptySlave, + } + + if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + command.Console = tty.SlavePty.Name() + + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + command.Stdout = t.SlavePty + command.Stderr = t.SlavePty + + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + command.Stdin = t.SlavePty + command.SysProcAttr.Setctty = true + + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + return nil +} + +func (t *TtyConsole) Close() error { + t.SlavePty.Close() + return t.MasterPty.Close() +} diff --git a/components/engine/daemon/execdriver/lxc/init.go b/components/engine/daemon/execdriver/lxc/init.go index 1af7730cae..2a91bbb5f5 100644 --- a/components/engine/daemon/execdriver/lxc/init.go +++ b/components/engine/daemon/execdriver/lxc/init.go @@ -2,19 +2,116 @@ package lxc import ( "encoding/json" + "flag" "fmt" "io/ioutil" + "log" "net" "os" + "os/exec" + "runtime" "strings" "syscall" + "github.com/docker/docker/reexec" "github.com/docker/libcontainer/netlink" - "github.com/dotcloud/docker/daemon/execdriver" ) +// Args provided to the init function for a driver +type InitArgs struct { + User string + Gateway string + Ip string + WorkDir string + Privileged bool + Env []string + Args []string + Mtu int + Console string + Pipe int + Root string + CapAdd string + CapDrop string +} + +func init() { + // like always lxc requires a hack to get this to work + reexec.Register("/.dockerinit", dockerInititalizer) +} + +func dockerInititalizer() { + initializer() +} + +// initializer is the lxc driver's init function that is run inside the namespace to setup +// additional configurations +func initializer() { + runtime.LockOSThread() + + args := getArgs() + + if err := setupNamespace(args); err != nil { + log.Fatal(err) + } +} + +func setupNamespace(args *InitArgs) error { + if err := setupEnv(args); err != nil { + return err + } + if err := setupHostname(args); err != nil { + return err + } + if err := setupNetworking(args); err != nil { + return err + } + if err := finalizeNamespace(args); err != nil { + return err + } + + path, err := exec.LookPath(args.Args[0]) + if err != nil { + log.Printf("Unable to locate %v", args.Args[0]) + os.Exit(127) + } + + if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { + return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) + } + + return nil +} + +func getArgs() *InitArgs { + var ( + // Get cmdline arguments + user = flag.String("u", "", "username or uid") + gateway = flag.String("g", "", "gateway address") + ip = flag.String("i", "", "ip address") + workDir = flag.String("w", "", "workdir") + privileged = flag.Bool("privileged", false, "privileged mode") + mtu = flag.Int("mtu", 1500, "interface mtu") + capAdd = flag.String("cap-add", "", "capabilities to add") + capDrop = flag.String("cap-drop", "", "capabilities to drop") + ) + + flag.Parse() + + return &InitArgs{ + User: *user, + Gateway: *gateway, + Ip: *ip, + WorkDir: *workDir, + Privileged: *privileged, + Args: flag.Args(), + Mtu: *mtu, + CapAdd: *capAdd, + CapDrop: *capDrop, + } +} + // Clear environment pollution introduced by lxc-start -func setupEnv(args *execdriver.InitArgs) error { +func setupEnv(args *InitArgs) error { // Get env var env []string content, err := ioutil.ReadFile(".dockerenv") @@ -41,7 +138,7 @@ func setupEnv(args *execdriver.InitArgs) error { return nil } -func setupHostname(args *execdriver.InitArgs) error { +func setupHostname(args *InitArgs) error { hostname := getEnv(args, "HOSTNAME") if hostname == "" { return nil @@ -50,7 +147,7 @@ func setupHostname(args *execdriver.InitArgs) error { } // Setup networking -func setupNetworking(args *execdriver.InitArgs) error { +func setupNetworking(args *InitArgs) error { if args.Ip != "" { // eth0 iface, err := net.InterfaceByName("eth0") @@ -95,7 +192,7 @@ func setupNetworking(args *execdriver.InitArgs) error { } // Setup working directory -func setupWorkingDirectory(args *execdriver.InitArgs) error { +func setupWorkingDirectory(args *InitArgs) error { if args.WorkDir == "" { return nil } @@ -105,7 +202,7 @@ func setupWorkingDirectory(args *execdriver.InitArgs) error { return nil } -func getEnv(args *execdriver.InitArgs, key string) string { +func getEnv(args *InitArgs, key string) string { for _, kv := range args.Env { parts := strings.SplitN(kv, "=", 2) if parts[0] == key && len(parts) == 2 { diff --git a/components/engine/daemon/execdriver/lxc/lxc_init_linux.go b/components/engine/daemon/execdriver/lxc/lxc_init_linux.go index 1fd497e9aa..625caa1608 100644 --- a/components/engine/daemon/execdriver/lxc/lxc_init_linux.go +++ b/components/engine/daemon/execdriver/lxc/lxc_init_linux.go @@ -1,24 +1,23 @@ -// +build amd64 - package lxc import ( "fmt" + "strings" "syscall" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/namespaces" "github.com/docker/libcontainer/security/capabilities" + "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/utils" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/daemon/execdriver/native/template" - "github.com/dotcloud/docker/pkg/system" ) func setHostname(hostname string) error { return syscall.Sethostname([]byte(hostname)) } -func finalizeNamespace(args *execdriver.InitArgs) error { +func finalizeNamespace(args *InitArgs) error { if err := utils.CloseExecFrom(3); err != nil { return err } @@ -48,8 +47,25 @@ func finalizeNamespace(args *execdriver.InitArgs) error { return fmt.Errorf("clear keep caps %s", err) } + var ( + adds []string + drops []string + ) + + if args.CapAdd != "" { + adds = strings.Split(args.CapAdd, ":") + } + if args.CapDrop != "" { + drops = strings.Split(args.CapDrop, ":") + } + + caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) + if err != nil { + return err + } + // drop all other capabilities - if err := capabilities.DropCapabilities(container.Capabilities); err != nil { + if err := capabilities.DropCapabilities(caps); err != nil { return fmt.Errorf("drop capabilities %s", err) } } diff --git a/components/engine/daemon/execdriver/lxc/lxc_init_unsupported.go b/components/engine/daemon/execdriver/lxc/lxc_init_unsupported.go index 079446e186..b3f2ae68eb 100644 --- a/components/engine/daemon/execdriver/lxc/lxc_init_unsupported.go +++ b/components/engine/daemon/execdriver/lxc/lxc_init_unsupported.go @@ -1,8 +1,8 @@ -// +build !linux !amd64 +// +build !linux package lxc -import "github.com/dotcloud/docker/daemon/execdriver" +import "github.com/docker/docker/daemon/execdriver" func setHostname(hostname string) error { panic("Not supported on darwin") diff --git a/components/engine/daemon/execdriver/lxc/lxc_template.go b/components/engine/daemon/execdriver/lxc/lxc_template.go index 88618f07fd..229b0a5144 100644 --- a/components/engine/daemon/execdriver/lxc/lxc_template.go +++ b/components/engine/daemon/execdriver/lxc/lxc_template.go @@ -4,8 +4,8 @@ import ( "strings" "text/template" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/libcontainer/label" - "github.com/dotcloud/docker/daemon/execdriver" ) const LxcTemplate = ` @@ -75,9 +75,9 @@ lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountL {{range $value := .Mounts}} {{if $value.Writable}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0 +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0 {{else}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0 +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0 {{end}} {{end}} diff --git a/components/engine/daemon/execdriver/lxc/lxc_template_unit_test.go b/components/engine/daemon/execdriver/lxc/lxc_template_unit_test.go index a9a67c421c..8acda804ee 100644 --- a/components/engine/daemon/execdriver/lxc/lxc_template_unit_test.go +++ b/components/engine/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -1,3 +1,5 @@ +// +build linux + package lxc import ( @@ -11,8 +13,8 @@ import ( "testing" "time" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/libcontainer/devices" - "github.com/dotcloud/docker/daemon/execdriver" ) func TestLXCConfig(t *testing.T) { @@ -35,7 +37,7 @@ func TestLXCConfig(t *testing.T) { cpu = cpuMin + rand.Intn(cpuMax-cpuMin) ) - driver, err := NewDriver(root, false) + driver, err := NewDriver(root, "", false) if err != nil { t.Fatal(err) } @@ -71,7 +73,7 @@ func TestCustomLxcConfig(t *testing.T) { os.MkdirAll(path.Join(root, "containers", "1"), 0777) - driver, err := NewDriver(root, false) + driver, err := NewDriver(root, "", false) if err != nil { t.Fatal(err) } diff --git a/components/engine/daemon/execdriver/native/configuration/parse.go b/components/engine/daemon/execdriver/native/configuration/parse.go index 8fb1b452b9..e021fa0de4 100644 --- a/components/engine/daemon/execdriver/native/configuration/parse.go +++ b/components/engine/daemon/execdriver/native/configuration/parse.go @@ -7,8 +7,8 @@ import ( "strconv" "strings" + "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer" - "github.com/dotcloud/docker/pkg/units" ) type Action func(*libcontainer.Config, interface{}, string) error diff --git a/components/engine/daemon/execdriver/native/configuration/parse_test.go b/components/engine/daemon/execdriver/native/configuration/parse_test.go index 0401d7b37e..1493d2b29b 100644 --- a/components/engine/daemon/execdriver/native/configuration/parse_test.go +++ b/components/engine/daemon/execdriver/native/configuration/parse_test.go @@ -3,8 +3,8 @@ package configuration import ( "testing" + "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/security/capabilities" - "github.com/dotcloud/docker/daemon/execdriver/native/template" ) // Checks whether the expected capability is specified in the capabilities. diff --git a/components/engine/daemon/execdriver/native/create.go b/components/engine/daemon/execdriver/native/create.go index f28507b046..e475a1f2ad 100644 --- a/components/engine/daemon/execdriver/native/create.go +++ b/components/engine/daemon/execdriver/native/create.go @@ -1,3 +1,5 @@ +// +build linux,cgo + package native import ( @@ -6,14 +8,14 @@ import ( "os/exec" "path/filepath" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/daemon/execdriver/native/configuration" + "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/devices" "github.com/docker/libcontainer/mount" "github.com/docker/libcontainer/security/capabilities" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/daemon/execdriver/native/configuration" - "github.com/dotcloud/docker/daemon/execdriver/native/template" ) // createContainer populates and configures the container type with the @@ -42,6 +44,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e if err := d.setPrivileged(container); err != nil { return nil, err } + } else { + if err := d.setCapabilities(container, c); err != nil { + return nil, err + } } if err := d.setupCgroups(container, c); err != nil { @@ -136,6 +142,11 @@ func (d *driver) setPrivileged(container *libcontainer.Config) (err error) { return nil } +func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.Command) (err error) { + container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop) + return err +} + func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error { if c.Resources != nil { container.Cgroups.CpuShares = c.Resources.CpuShares diff --git a/components/engine/daemon/execdriver/native/driver.go b/components/engine/daemon/execdriver/native/driver.go index 90333703c5..c45188b6bc 100644 --- a/components/engine/daemon/execdriver/native/driver.go +++ b/components/engine/daemon/execdriver/native/driver.go @@ -1,8 +1,11 @@ +// +build linux,cgo + package native import ( "encoding/json" "fmt" + "io" "io/ioutil" "os" "os/exec" @@ -11,13 +14,15 @@ import ( "sync" "syscall" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/term" "github.com/docker/libcontainer" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/cgroups/fs" "github.com/docker/libcontainer/cgroups/systemd" + consolepkg "github.com/docker/libcontainer/console" "github.com/docker/libcontainer/namespaces" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/system" + "github.com/docker/libcontainer/system" ) const ( @@ -25,34 +30,6 @@ const ( Version = "0.2" ) -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - var container *libcontainer.Config - f, err := os.Open(filepath.Join(args.Root, "container.json")) - if err != nil { - return err - } - if err := json.NewDecoder(f).Decode(&container); err != nil { - f.Close() - return err - } - f.Close() - - rootfs, err := os.Getwd() - if err != nil { - return err - } - syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(args.Pipe)) - if err != nil { - return err - } - if err := namespaces.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil { - return err - } - return nil - }) -} - type activeContainer struct { container *libcontainer.Config cmd *exec.Cmd @@ -88,6 +65,19 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba if err != nil { return -1, err } + + var term execdriver.Terminal + + if c.Tty { + term, err = NewTtyConsole(c, pipes) + } else { + term, err = execdriver.NewStdConsole(c, pipes) + } + if err != nil { + return -1, err + } + c.Terminal = term + d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, @@ -99,6 +89,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba dataPath = filepath.Join(d.root, c.ID) args = append([]string{c.Entrypoint}, c.Arguments...) ) + if err := d.createContainerRoot(c.ID); err != nil { return -1, err } @@ -108,16 +99,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba return -1, err } - term := getTerminal(c, pipes) - - return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { - // we need to join the rootfs because namespaces will setup the rootfs and chroot - initPath := filepath.Join(c.Rootfs, c.InitPath) - + return namespaces.Exec(container, c.Stdin, c.Stdout, c.Stderr, c.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { c.Path = d.initPath c.Args = append([]string{ - initPath, - "-driver", DriverName, + DriverName, "-console", console, "-pipe", "3", "-root", filepath.Join(d.root, c.ID), @@ -125,8 +110,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba }, args...) // set this to nil so that when we set the clone flags anything else is reset - c.SysProcAttr = nil - system.SetCloneFlags(&c.Cmd, uintptr(namespaces.GetNamespaceFlags(container.Namespaces))) + c.SysProcAttr = &syscall.SysProcAttr{ + Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), + } c.ExtraFiles = []*os.File{child} c.Env = container.Env @@ -194,11 +180,13 @@ func (d *driver) Terminate(p *execdriver.Command) error { if err != nil { return err } + if state.InitStartTime == currentStartTime { err = syscall.Kill(p.Process.Pid, 9) syscall.Wait4(p.Process.Pid, nil, 0, nil) } d.removeContainerRoot(p.ID) + return err } @@ -260,17 +248,60 @@ func getEnv(key string, env []string) string { return "" } -func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) namespaces.Terminal { - var term namespaces.Terminal - if c.Tty { - term = &dockerTtyTerm{ - pipes: pipes, - } - } else { - term = &dockerStdTerm{ - pipes: pipes, - } - } - c.Terminal = term - return term +type TtyConsole struct { + MasterPty *os.File +} + +func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) { + ptyMaster, console, err := consolepkg.CreateMasterAndConsole() + if err != nil { + return nil, err + } + + tty := &TtyConsole{ + MasterPty: ptyMaster, + } + + if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + + command.Console = console + + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + go func() { + io.Copy(t.MasterPty, pipes.Stdin) + + pipes.Stdin.Close() + }() + } + + return nil +} + +func (t *TtyConsole) Close() error { + return t.MasterPty.Close() } diff --git a/components/engine/daemon/execdriver/native/driver_unsupported.go b/components/engine/daemon/execdriver/native/driver_unsupported.go new file mode 100644 index 0000000000..97839cf3b2 --- /dev/null +++ b/components/engine/daemon/execdriver/native/driver_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff --git a/components/engine/daemon/execdriver/native/driver_unsupported_nocgo.go b/components/engine/daemon/execdriver/native/driver_unsupported_nocgo.go new file mode 100644 index 0000000000..2b8e9f81a1 --- /dev/null +++ b/components/engine/daemon/execdriver/native/driver_unsupported_nocgo.go @@ -0,0 +1,13 @@ +// +build linux,!cgo + +package native + +import ( + "fmt" + + "github.com/docker/docker/daemon/execdriver" +) + +func NewDriver(root, initPath string) (execdriver.Driver, error) { + return nil, fmt.Errorf("native driver not supported on non-linux") +} diff --git a/components/engine/daemon/execdriver/native/info.go b/components/engine/daemon/execdriver/native/info.go index c34d0297b1..601b97e810 100644 --- a/components/engine/daemon/execdriver/native/info.go +++ b/components/engine/daemon/execdriver/native/info.go @@ -1,3 +1,5 @@ +// +build linux,cgo + package native import ( diff --git a/components/engine/daemon/execdriver/native/init.go b/components/engine/daemon/execdriver/native/init.go new file mode 100644 index 0000000000..7021eeb67e --- /dev/null +++ b/components/engine/daemon/execdriver/native/init.go @@ -0,0 +1,66 @@ +// +build linux + +package native + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/reexec" + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/syncpipe" +) + +func init() { + reexec.Register(DriverName, initializer) +} + +func initializer() { + runtime.LockOSThread() + + var ( + pipe = flag.Int("pipe", 0, "sync pipe fd") + console = flag.String("console", "", "console (pty slave) path") + root = flag.String("root", ".", "root path for configuration files") + ) + + flag.Parse() + + var container *libcontainer.Config + f, err := os.Open(filepath.Join(*root, "container.json")) + if err != nil { + writeError(err) + } + + if err := json.NewDecoder(f).Decode(&container); err != nil { + f.Close() + writeError(err) + } + f.Close() + + rootfs, err := os.Getwd() + if err != nil { + writeError(err) + } + + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe)) + if err != nil { + writeError(err) + } + + if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil { + writeError(err) + } + + panic("Unreachable") +} + +func writeError(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff --git a/components/engine/daemon/execdriver/native/template/default_template.go b/components/engine/daemon/execdriver/native/template/default_template.go index d0894a0c9f..be3dd5a5c1 100644 --- a/components/engine/daemon/execdriver/native/template/default_template.go +++ b/components/engine/daemon/execdriver/native/template/default_template.go @@ -12,6 +12,7 @@ func New() *libcontainer.Config { Capabilities: []string{ "CHOWN", "DAC_OVERRIDE", + "FSETID", "FOWNER", "MKNOD", "NET_RAW", @@ -22,6 +23,7 @@ func New() *libcontainer.Config { "NET_BIND_SERVICE", "SYS_CHROOT", "KILL", + "AUDIT_WRITE", }, Namespaces: map[string]bool{ "NEWNS": true, diff --git a/components/engine/daemon/execdriver/native/term.go b/components/engine/daemon/execdriver/native/term.go deleted file mode 100644 index f60351c609..0000000000 --- a/components/engine/daemon/execdriver/native/term.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - These types are wrappers around the libcontainer Terminal interface so that - we can resuse the docker implementations where possible. -*/ -package native - -import ( - "github.com/dotcloud/docker/daemon/execdriver" - "io" - "os" - "os/exec" -) - -type dockerStdTerm struct { - execdriver.StdConsole - pipes *execdriver.Pipes -} - -func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error { - return d.AttachPipes(cmd, d.pipes) -} - -func (d *dockerStdTerm) SetMaster(master *os.File) { - // do nothing -} - -type dockerTtyTerm struct { - execdriver.TtyConsole - pipes *execdriver.Pipes -} - -func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error { - go io.Copy(t.pipes.Stdout, t.MasterPty) - if t.pipes.Stdin != nil { - go io.Copy(t.MasterPty, t.pipes.Stdin) - } - return nil -} - -func (t *dockerTtyTerm) SetMaster(master *os.File) { - t.MasterPty = master -} diff --git a/components/engine/daemon/execdriver/termconsole.go b/components/engine/daemon/execdriver/termconsole.go index af6b88d3d1..dc0e54ccdb 100644 --- a/components/engine/daemon/execdriver/termconsole.go +++ b/components/engine/daemon/execdriver/termconsole.go @@ -1,90 +1,10 @@ package execdriver import ( - "github.com/dotcloud/docker/pkg/term" - "github.com/kr/pty" "io" - "os" "os/exec" ) -func SetTerminal(command *Command, pipes *Pipes) error { - var ( - term Terminal - err error - ) - if command.Tty { - term, err = NewTtyConsole(command, pipes) - } else { - term, err = NewStdConsole(command, pipes) - } - if err != nil { - return err - } - command.Terminal = term - return nil -} - -type TtyConsole struct { - MasterPty *os.File - SlavePty *os.File -} - -func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) { - ptyMaster, ptySlave, err := pty.Open() - if err != nil { - return nil, err - } - tty := &TtyConsole{ - MasterPty: ptyMaster, - SlavePty: ptySlave, - } - if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { - tty.Close() - return nil, err - } - command.Console = tty.SlavePty.Name() - return tty, nil -} - -func (t *TtyConsole) Master() *os.File { - return t.MasterPty -} - -func (t *TtyConsole) Resize(h, w int) error { - return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) -} - -func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { - command.Stdout = t.SlavePty - command.Stderr = t.SlavePty - - go func() { - if wb, ok := pipes.Stdout.(interface { - CloseWriters() error - }); ok { - defer wb.CloseWriters() - } - io.Copy(pipes.Stdout, t.MasterPty) - }() - - if pipes.Stdin != nil { - command.Stdin = t.SlavePty - command.SysProcAttr.Setctty = true - - go func() { - defer pipes.Stdin.Close() - io.Copy(t.MasterPty, pipes.Stdin) - }() - } - return nil -} - -func (t *TtyConsole) Close() error { - t.SlavePty.Close() - return t.MasterPty.Close() -} - type StdConsole struct { } diff --git a/components/engine/daemon/execdriver/utils.go b/components/engine/daemon/execdriver/utils.go new file mode 100644 index 0000000000..37042ef83a --- /dev/null +++ b/components/engine/daemon/execdriver/utils.go @@ -0,0 +1,63 @@ +package execdriver + +import ( + "fmt" + "strings" + + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/security/capabilities" +) + +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = capabilities.GetAllCapabilities() + ) + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + if !utils.StringsContainsNoCase(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if utils.StringsContainsNoCase(adds, "all") { + basics = capabilities.GetAllCapabilities() + } + + if !utils.StringsContainsNoCase(drops, "all") { + for _, cap := range basics { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !utils.StringsContainsNoCase(drops, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` aready handled above + if strings.ToLower(cap) == "all" { + continue + } + + if !utils.StringsContainsNoCase(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !utils.StringsContainsNoCase(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + + return newCaps, nil +} diff --git a/components/engine/daemon/export.go b/components/engine/daemon/export.go new file mode 100644 index 0000000000..bc0f14a3bb --- /dev/null +++ b/components/engine/daemon/export.go @@ -0,0 +1,30 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + data, err := container.Export() + if err != nil { + return job.Errorf("%s: %s", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Errorf("%s: %s", name, err) + } + // FIXME: factor job-specific LogEvent to engine.Job.Run() + container.LogEvent("export") + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/components/engine/daemon/graphdriver/aufs/aufs.go b/components/engine/daemon/graphdriver/aufs/aufs.go index 0206b92e17..ebd4929389 100644 --- a/components/engine/daemon/graphdriver/aufs/aufs.go +++ b/components/engine/daemon/graphdriver/aufs/aufs.go @@ -30,11 +30,12 @@ import ( "sync" "syscall" + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" "github.com/docker/libcontainer/label" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/graphdriver" - mountpk "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" ) var ( @@ -209,7 +210,7 @@ func (a *Driver) Remove(id string) error { defer a.Unlock() if a.active[id] != 0 { - utils.Errorf("Warning: removing active id %s\n", id) + log.Errorf("Warning: removing active id %s", id) } // Make sure the dir is umounted first @@ -378,7 +379,7 @@ func (a *Driver) Cleanup() error { for _, id := range ids { if err := a.unmount(id); err != nil { - utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) + log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) } } diff --git a/components/engine/daemon/graphdriver/aufs/aufs_test.go b/components/engine/daemon/graphdriver/aufs/aufs_test.go index b3bad410a5..081fb88984 100644 --- a/components/engine/daemon/graphdriver/aufs/aufs_test.go +++ b/components/engine/daemon/graphdriver/aufs/aufs_test.go @@ -4,8 +4,8 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon/graphdriver" "io/ioutil" "os" "path" diff --git a/components/engine/daemon/graphdriver/aufs/mount.go b/components/engine/daemon/graphdriver/aufs/mount.go index 1f1d98f809..fa74e05b07 100644 --- a/components/engine/daemon/graphdriver/aufs/mount.go +++ b/components/engine/daemon/graphdriver/aufs/mount.go @@ -1,14 +1,15 @@ package aufs import ( - "github.com/dotcloud/docker/utils" "os/exec" "syscall" + + "github.com/docker/docker/pkg/log" ) func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { - utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) + log.Errorf("[warning]: couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err diff --git a/components/engine/daemon/graphdriver/aufs/mount_linux.go b/components/engine/daemon/graphdriver/aufs/mount_linux.go index 6082d9f240..c86f1bbd63 100644 --- a/components/engine/daemon/graphdriver/aufs/mount_linux.go +++ b/components/engine/daemon/graphdriver/aufs/mount_linux.go @@ -1,5 +1,3 @@ -// +build amd64 - package aufs import "syscall" diff --git a/components/engine/daemon/graphdriver/aufs/mount_unsupported.go b/components/engine/daemon/graphdriver/aufs/mount_unsupported.go index 2735624112..e291bef3aa 100644 --- a/components/engine/daemon/graphdriver/aufs/mount_unsupported.go +++ b/components/engine/daemon/graphdriver/aufs/mount_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux !amd64 +// +build !linux package aufs diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs.go b/components/engine/daemon/graphdriver/btrfs/btrfs.go index f561244c51..c491fd7908 100644 --- a/components/engine/daemon/graphdriver/btrfs/btrfs.go +++ b/components/engine/daemon/graphdriver/btrfs/btrfs.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package btrfs @@ -16,8 +16,8 @@ import ( "syscall" "unsafe" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/pkg/mount" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" ) func init() { diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs_test.go b/components/engine/daemon/graphdriver/btrfs/btrfs_test.go index 3069a98557..cde23ce4a0 100644 --- a/components/engine/daemon/graphdriver/btrfs/btrfs_test.go +++ b/components/engine/daemon/graphdriver/btrfs/btrfs_test.go @@ -1,7 +1,7 @@ package btrfs import ( - "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" ) diff --git a/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go b/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go index 6c44615763..f07088887a 100644 --- a/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go +++ b/components/engine/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -1,3 +1,3 @@ -// +build !linux !amd64 +// +build !linux !cgo package btrfs diff --git a/components/engine/daemon/graphdriver/devmapper/README.md b/components/engine/daemon/graphdriver/devmapper/README.md index c8ab1d1ee1..c42620247b 100644 --- a/components/engine/daemon/graphdriver/devmapper/README.md +++ b/components/engine/daemon/graphdriver/devmapper/README.md @@ -7,7 +7,7 @@ module (dm-thinp) to implement CoW snapshots. For each devicemapper graph location (typically `/var/lib/docker/devicemapper`, $graph below) a thin pool is created based on two block devices, one for data and one for metadata. By default these block devices are created -automatically by using loopback mounts of automatically creates sparse +automatically by using loopback mounts of automatically created sparse files. The default loopback files used are `$graph/devicemapper/data` and @@ -15,15 +15,15 @@ The default loopback files used are `$graph/devicemapper/data` and from docker entities to the corresponding devicemapper volumes is stored in the `$graph/devicemapper/json` file (encoded as Json). -In order to support multiple devicemapper graphs on a system the thin +In order to support multiple devicemapper graphs on a system, the thin pool will be named something like: `docker-0:33-19478248-pool`, where -the `0:30` part is the minor/major device nr and `19478248` is the +the `0:33` part is the minor/major device nr and `19478248` is the inode number of the $graph directory. -On the thin pool docker automatically creates a base thin device, +On the thin pool, docker automatically creates a base thin device, called something like `docker-0:33-19478248-base` of a fixed -size. This is automatically formated on creation and contains just an -empty filesystem. This device is the base of all docker images and +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and containers. All base images are snapshots of this device and those images are then in turn used as snapshots for other images and eventually containers. @@ -31,8 +31,8 @@ eventually containers. ### options The devicemapper backend supports some options that you can specify -when starting the docker daemon using the --storage-opt flags. -This uses the `dm` prefix and would be used somthing like `docker -d --storage-opt dm.foo=bar`. +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`. Here is the list of supported options: @@ -43,7 +43,11 @@ Here is the list of supported options: 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for the empty - case the larger the device is. + case the larger the device is. **Warning**: This value affects the + system-wide "base" empty filesystem that may already be + initialized and inherited by pulled images. Typically, a change + to this value will require additional steps to take effect: 1) + stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`. Example use: @@ -126,6 +130,15 @@ Here is the list of supported options: ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + ``docker -d --storage-opt dm.blocksize=512K`` + * `dm.blkdiscard` Enables or disables the use of blkdiscard when removing diff --git a/components/engine/daemon/graphdriver/devmapper/attach_loopback.go b/components/engine/daemon/graphdriver/devmapper/attach_loopback.go index 28a648a5e1..86714d1959 100644 --- a/components/engine/daemon/graphdriver/devmapper/attach_loopback.go +++ b/components/engine/daemon/graphdriver/devmapper/attach_loopback.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper @@ -7,7 +7,7 @@ import ( "os" "syscall" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/pkg/log" ) func stringToLoopName(src string) [LoNameSize]uint8 { @@ -39,20 +39,20 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil fi, err := os.Stat(target) if err != nil { if os.IsNotExist(err) { - utils.Errorf("There are no more loopback devices available.") + log.Errorf("There are no more loopback devices available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&os.ModeDevice != os.ModeDevice { - utils.Errorf("Loopback device %s is not a block device.", target) + log.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { - utils.Errorf("Error openning loopback device: %s", err) + log.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice } @@ -62,7 +62,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // If the error is EBUSY, then try the next loopback if err != syscall.EBUSY { - utils.Errorf("Cannot set up loopback device %s: %s", target, err) + log.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } @@ -75,7 +75,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // This can't happen, but let's be sure if loopFile == nil { - utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } @@ -91,13 +91,13 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) { // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { - utils.Debugf("Error retrieving the next available loopback: %s", err) + log.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { - utils.Errorf("Error openning sparse file %s: %s", sparseName, err) + log.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() @@ -115,11 +115,11 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) { } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { - utils.Errorf("Cannot set up loopback device info: %s", err) + log.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - utils.Errorf("Error while cleaning up the loopback device") + log.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice diff --git a/components/engine/daemon/graphdriver/devmapper/deviceset.go b/components/engine/daemon/graphdriver/devmapper/deviceset.go index 31c3f391e4..42315c6e82 100644 --- a/components/engine/daemon/graphdriver/devmapper/deviceset.go +++ b/components/engine/daemon/graphdriver/devmapper/deviceset.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper @@ -18,16 +18,18 @@ import ( "syscall" "time" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer/label" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/pkg/units" - "github.com/dotcloud/docker/utils" ) var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors ) type DevInfo struct { @@ -78,6 +80,7 @@ type DeviceSet struct { dataDevice string metadataDevice string doBlkDiscard bool + thinpBlockSize uint32 } type DiskUsage struct { @@ -171,7 +174,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { if !os.IsNotExist(err) { return "", err } - utils.Debugf("Creating loopback file %s for device-manage use", filename) + log.Debugf("Creating loopback file %s for device-manage use", filename) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err @@ -249,7 +252,7 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { } func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { - utils.Debugf("registerDevice(%v, %v)", id, hash) + log.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, @@ -275,7 +278,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev } func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { - utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash) + log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil @@ -382,13 +385,13 @@ func (devices *DeviceSet) setupBaseImage() error { } if oldInfo != nil && !oldInfo.Initialized { - utils.Debugf("Removing uninitialized base image") + log.Debugf("Removing uninitialized base image") if err := devices.deleteDevice(oldInfo); err != nil { return err } } - utils.Debugf("Initializing base device-manager snapshot") + log.Debugf("Initializing base device-manager snapshot") id := devices.nextDeviceId @@ -400,14 +403,14 @@ func (devices *DeviceSet) setupBaseImage() error { // Ids are 24bit, so wrap around devices.nextDeviceId = (id + 1) & 0xffffff - utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) + log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) info, err := devices.registerDevice(id, "", devices.baseFsSize) if err != nil { _ = deleteDevice(devices.getPoolDevName(), id) return err } - utils.Debugf("Creating filesystem on base device-manager snapshot") + log.Debugf("Creating filesystem on base device-manager snapshot") if err = devices.activateDeviceIfNeeded(info); err != nil { return err @@ -445,7 +448,7 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes return // Ignore _LOG_DEBUG } - utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } func major(device uint64) uint64 { @@ -510,7 +513,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } // Reload with the new block sizes - if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { + if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { return fmt.Errorf("Unable to reload pool: %s", err) } @@ -549,13 +552,13 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) - utils.Debugf("Generated prefix: %s", devices.devicePrefix) + log.Debugf("Generated prefix: %s", devices.devicePrefix) // Check for the existence of the device -pool - utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) + log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) info, err := getInfo(devices.getPoolName()) if info == nil { - utils.Debugf("Error device getInfo: %s", err) + log.Debugf("Error device getInfo: %s", err) return err } @@ -571,7 +574,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // If the pool doesn't exist, create it if info.Exists == 0 { - utils.Debugf("Pool doesn't exist. Creating it.") + log.Debugf("Pool doesn't exist. Creating it.") var ( dataFile *os.File @@ -593,7 +596,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { data, err := devices.ensureImage("data", devices.dataLoopbackSize) if err != nil { - utils.Debugf("Error device ensureImage (data): %s\n", err) + log.Debugf("Error device ensureImage (data): %s", err) return err } @@ -624,7 +627,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) if err != nil { - utils.Debugf("Error device ensureImage (metadata): %s\n", err) + log.Debugf("Error device ensureImage (metadata): %s", err) return err } @@ -640,7 +643,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } defer metadataFile.Close() - if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { + if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } } @@ -656,7 +659,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { - utils.Debugf("Error device setupBaseImage: %s\n", err) + log.Debugf("Error device setupBaseImage: %s", err) return err } } @@ -683,7 +686,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { deviceId := devices.nextDeviceId if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { - utils.Debugf("Error creating snap device: %s\n", err) + log.Debugf("Error creating snap device: %s", err) return err } @@ -692,7 +695,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { deleteDevice(devices.getPoolDevName(), deviceId) - utils.Debugf("Error registering device: %s\n", err) + log.Debugf("Error registering device: %s", err) return err } return nil @@ -705,7 +708,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { // manually if err := devices.activateDeviceIfNeeded(info); err == nil { if err := BlockDeviceDiscard(info.DevName()); err != nil { - utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) + log.Debugf("Error discarding block on device: %s (ignoring)", err) } } } @@ -713,13 +716,13 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { devinfo, _ := getInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { - utils.Debugf("Error removing device: %s\n", err) + log.Debugf("Error removing device: %s", err) return err } } if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { - utils.Debugf("Error deleting device: %s\n", err) + log.Debugf("Error deleting device: %s", err) return err } @@ -732,7 +735,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { devices.devicesLock.Lock() devices.Devices[info.Hash] = info devices.devicesLock.Unlock() - utils.Debugf("Error removing meta data: %s\n", err) + log.Debugf("Error removing meta data: %s", err) return err } @@ -755,8 +758,8 @@ func (devices *DeviceSet) DeleteDevice(hash string) error { } func (devices *DeviceSet) deactivatePool() error { - utils.Debugf("[devmapper] deactivatePool()") - defer utils.Debugf("[devmapper] deactivatePool END") + log.Debugf("[devmapper] deactivatePool()") + defer log.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() devinfo, err := getInfo(devname) if err != nil { @@ -770,13 +773,13 @@ func (devices *DeviceSet) deactivatePool() error { } func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { - utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) - defer utils.Debugf("[devmapper] deactivateDevice END") + log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) + defer log.Debugf("[devmapper] deactivateDevice END") // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device if err := devices.waitClose(info); err != nil { - utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err) + log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) } devinfo, err := getInfo(info.Name()) @@ -826,8 +829,8 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { // a) the device registered at - is removed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitRemove(devname string) error { - utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) - defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) + log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(devname) @@ -837,7 +840,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { return nil } if i%100 == 0 { - utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) + log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) } if devinfo.Exists == 0 { break @@ -864,7 +867,7 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error { return err } if i%100 == 0 { - utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) + log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break @@ -881,9 +884,9 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error { func (devices *DeviceSet) Shutdown() error { - utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) - utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) - defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) + log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) + log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) + defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) var devs []*DevInfo @@ -900,12 +903,12 @@ func (devices *DeviceSet) Shutdown() error { // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { - utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) + log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) } devices.Lock() if err := devices.deactivateDevice(info); err != nil { - utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err) + log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) } devices.Unlock() } @@ -917,7 +920,7 @@ func (devices *DeviceSet) Shutdown() error { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { - utils.Debugf("Shutdown deactivate base , error: %s\n", err) + log.Debugf("Shutdown deactivate base , error: %s", err) } devices.Unlock() info.lock.Unlock() @@ -925,7 +928,7 @@ func (devices *DeviceSet) Shutdown() error { devices.Lock() if err := devices.deactivatePool(); err != nil { - utils.Debugf("Shutdown deactivate pool , error: %s\n", err) + log.Debugf("Shutdown deactivate pool , error: %s", err) } devices.Unlock() @@ -989,8 +992,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { } func (devices *DeviceSet) UnmountDevice(hash string) error { - utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) - defer utils.Debugf("[devmapper] UnmountDevice END") + log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) + defer log.Debugf("[devmapper] UnmountDevice END") info, err := devices.lookupDevice(hash) if err != nil { @@ -1012,11 +1015,11 @@ func (devices *DeviceSet) UnmountDevice(hash string) error { return nil } - utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) + log.Debugf("[devmapper] Unmount(%s)", info.mountPath) if err := syscall.Unmount(info.mountPath, 0); err != nil { return err } - utils.Debugf("[devmapper] Unmount done") + log.Debugf("[devmapper] Unmount done") if err := devices.deactivateDevice(info); err != nil { return err @@ -1159,30 +1162,31 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error baseFsSize: DefaultBaseFsSize, filesystem: "ext4", doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, } foundBlkDiscard := false for _, option := range options { - key, val, err := utils.ParseKeyValueOpt(option) + key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "dm.basesize": - size, err := units.FromHumanSize(val) + size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.baseFsSize = uint64(size) case "dm.loopdatasize": - size, err := units.FromHumanSize(val) + size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.dataLoopbackSize = size case "dm.loopmetadatasize": - size, err := units.FromHumanSize(val) + size, err := units.RAMInBytes(val) if err != nil { return nil, err } @@ -1206,6 +1210,13 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error if err != nil { return nil, err } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 default: return nil, fmt.Errorf("Unknown option %s\n", key) } diff --git a/components/engine/daemon/graphdriver/devmapper/devmapper.go b/components/engine/daemon/graphdriver/devmapper/devmapper.go index a6602c276e..d09e740749 100644 --- a/components/engine/daemon/graphdriver/devmapper/devmapper.go +++ b/components/engine/daemon/graphdriver/devmapper/devmapper.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper @@ -9,7 +9,7 @@ import ( "runtime" "syscall" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/pkg/log" ) type DevmapperLogger interface { @@ -198,7 +198,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { - utils.Errorf("Error get loopback backing file: %s\n", err) + log.Errorf("Error get loopback backing file: %s", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil @@ -206,7 +206,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { func LoopbackSetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - utils.Errorf("Error loopbackSetCapacity: %s", err) + log.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil @@ -246,7 +246,7 @@ func FindLoopDeviceFor(file *os.File) *os.File { func UdevWait(cookie uint) error { if res := DmUdevWait(cookie); res != 1 { - utils.Debugf("Failed to wait on udev cookie %d", cookie) + log.Debugf("Failed to wait on udev cookie %d", cookie) return ErrUdevWait } return nil @@ -265,7 +265,7 @@ func logInit(logger DevmapperLogger) { func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { - utils.Debugf("Error dm_set_dev_dir") + log.Debugf("Error dm_set_dev_dir") return ErrSetDevDir } return nil @@ -286,7 +286,7 @@ func RemoveDevice(name string) error { return ErrCreateRemoveTask } if err := task.SetName(name); err != nil { - utils.Debugf("Can't set task name %s", name) + log.Debugf("Can't set task name %s", name) return err } if err := task.Run(); err != nil { @@ -298,7 +298,7 @@ func RemoveDevice(name string) error { func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { - utils.Errorf("Error getblockdevicesize: %s", err) + log.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil @@ -328,7 +328,7 @@ func BlockDeviceDiscard(path string) error { } // This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *os.File) error { +func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := createTask(DeviceCreate, poolName) if task == nil { return err @@ -339,7 +339,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error { return fmt.Errorf("Can't get data size %s", err) } - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target %s", err) } @@ -358,7 +358,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File) error { return nil } -func reloadPool(poolName string, dataFile, metadataFile *os.File) error { +func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := createTask(DeviceReload, poolName) if task == nil { return err @@ -369,7 +369,7 @@ func reloadPool(poolName string, dataFile, metadataFile *os.File) error { return fmt.Errorf("Can't get data size %s", err) } - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target %s", err) } @@ -417,21 +417,21 @@ func getDriverVersion() (string, error) { func getStatus(name string) (uint64, uint64, string, string, error) { task, err := createTask(DeviceStatus, name) if task == nil { - utils.Debugf("getStatus: Error createTask: %s", err) + log.Debugf("getStatus: Error createTask: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { - utils.Debugf("getStatus: Error Run: %s", err) + log.Debugf("getStatus: Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.GetInfo() if err != nil { - utils.Debugf("getStatus: Error GetInfo: %s", err) + log.Debugf("getStatus: Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { - utils.Debugf("getStatus: Non existing device %s", name) + log.Debugf("getStatus: Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) } @@ -491,7 +491,7 @@ func resumeDevice(name string) error { } func createDevice(poolName string, deviceId *int) error { - utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) + log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) for { task, err := createTask(DeviceTargetMsg, poolName) @@ -542,8 +542,8 @@ func deleteDevice(poolName string, deviceId int) error { } func removeDevice(name string) error { - utils.Debugf("[devmapper] removeDevice START") - defer utils.Debugf("[devmapper] removeDevice END") + log.Debugf("[devmapper] removeDevice START") + defer log.Debugf("[devmapper] removeDevice END") task, err := createTask(DeviceRemove, name) if task == nil { return err diff --git a/components/engine/daemon/graphdriver/devmapper/devmapper_log.go b/components/engine/daemon/graphdriver/devmapper/devmapper_log.go index cdeaed2525..ec7809cc51 100644 --- a/components/engine/daemon/graphdriver/devmapper/devmapper_log.go +++ b/components/engine/daemon/graphdriver/devmapper/devmapper_log.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper diff --git a/components/engine/daemon/graphdriver/devmapper/devmapper_test.go b/components/engine/daemon/graphdriver/devmapper/devmapper_test.go index 7c97d6bb04..167261999e 100644 --- a/components/engine/daemon/graphdriver/devmapper/devmapper_test.go +++ b/components/engine/daemon/graphdriver/devmapper/devmapper_test.go @@ -1,9 +1,9 @@ -// +build linux,amd64 +// +build linux package devmapper import ( - "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" ) diff --git a/components/engine/daemon/graphdriver/devmapper/devmapper_wrapper.go b/components/engine/daemon/graphdriver/devmapper/devmapper_wrapper.go index 9f1b5a6054..bd1c6fd5b6 100644 --- a/components/engine/daemon/graphdriver/devmapper/devmapper_wrapper.go +++ b/components/engine/daemon/graphdriver/devmapper/devmapper_wrapper.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper diff --git a/components/engine/daemon/graphdriver/devmapper/driver.go b/components/engine/daemon/graphdriver/devmapper/driver.go index cf82ad62ed..4c13eb0421 100644 --- a/components/engine/daemon/graphdriver/devmapper/driver.go +++ b/components/engine/daemon/graphdriver/devmapper/driver.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper @@ -8,9 +8,9 @@ import ( "os" "path" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/mount" ) func init() { @@ -54,6 +54,7 @@ func (d *Driver) Status() [][2]string { status := [][2]string{ {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%d Kb", s.SectorSize/1024)}, {"Data file", s.DataLoopback}, {"Metadata file", s.MetadataLoopback}, {"Data Space Used", fmt.Sprintf("%.1f Mb", float64(s.Data.Used)/(1024*1024))}, @@ -137,7 +138,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { func (d *Driver) Put(id string) { if err := d.DeviceSet.UnmountDevice(id); err != nil { - utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) + log.Errorf("Warning: error unmounting device %s: %s", id, err) } } diff --git a/components/engine/daemon/graphdriver/devmapper/ioctl.go b/components/engine/daemon/graphdriver/devmapper/ioctl.go index 8f403da2b0..29caab0664 100644 --- a/components/engine/daemon/graphdriver/devmapper/ioctl.go +++ b/components/engine/daemon/graphdriver/devmapper/ioctl.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper diff --git a/components/engine/daemon/graphdriver/devmapper/mount.go b/components/engine/daemon/graphdriver/devmapper/mount.go index c9ff216d5d..f64e995744 100644 --- a/components/engine/daemon/graphdriver/devmapper/mount.go +++ b/components/engine/daemon/graphdriver/devmapper/mount.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux package devmapper diff --git a/components/engine/daemon/graphdriver/driver.go b/components/engine/daemon/graphdriver/driver.go index 4536489706..90ed1d8162 100644 --- a/components/engine/daemon/graphdriver/driver.go +++ b/components/engine/daemon/graphdriver/driver.go @@ -6,8 +6,8 @@ import ( "os" "path" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/pkg/mount" + "github.com/docker/docker/archive" + "github.com/docker/docker/pkg/mount" ) type FsMagic uint64 diff --git a/components/engine/daemon/graphdriver/graphtest/graphtest.go b/components/engine/daemon/graphdriver/graphtest/graphtest.go index a667f2afa6..6407e1205d 100644 --- a/components/engine/daemon/graphdriver/graphtest/graphtest.go +++ b/components/engine/daemon/graphdriver/graphtest/graphtest.go @@ -7,7 +7,7 @@ import ( "syscall" "testing" - "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver" ) var ( diff --git a/components/engine/daemon/graphdriver/vfs/driver.go b/components/engine/daemon/graphdriver/vfs/driver.go index 7eaa22461f..2ea6325a1e 100644 --- a/components/engine/daemon/graphdriver/vfs/driver.go +++ b/components/engine/daemon/graphdriver/vfs/driver.go @@ -3,7 +3,7 @@ package vfs import ( "bytes" "fmt" - "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver" "os" "os/exec" "path" diff --git a/components/engine/daemon/graphdriver/vfs/vfs_test.go b/components/engine/daemon/graphdriver/vfs/vfs_test.go index e79f93c91d..eaf70f59d3 100644 --- a/components/engine/daemon/graphdriver/vfs/vfs_test.go +++ b/components/engine/daemon/graphdriver/vfs/vfs_test.go @@ -1,7 +1,7 @@ package vfs import ( - "github.com/dotcloud/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" ) diff --git a/components/engine/daemon/image_delete.go b/components/engine/daemon/image_delete.go new file mode 100644 index 0000000000..77e8f85907 --- /dev/null +++ b/components/engine/daemon/image_delete.go @@ -0,0 +1,156 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + imgs := engine.NewTable("", 0) + if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { + return job.Error(err) + } + if len(imgs.Data) == 0 { + return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) + } + if _, err := imgs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// FIXME: make this private and use the job instead +func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.Table, first, force, noprune bool) error { + var ( + repoName, tag string + tags = []string{} + tagDeleted bool + ) + + // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes + repoName, tag = parsers.ParseRepositoryTag(name) + if tag == "" { + tag = graph.DEFAULTTAG + } + + img, err := daemon.Repositories().LookupImage(name) + if err != nil { + if r, _ := daemon.Repositories().Get(repoName); r != nil { + return fmt.Errorf("No such image: %s:%s", repoName, tag) + } + return fmt.Errorf("No such image: %s", name) + } + + if strings.Contains(img.ID, name) { + repoName = "" + tag = "" + } + + byParents, err := daemon.Graph().ByParent() + if err != nil { + return err + } + + //If delete by id, see if the id belong only to one repository + if repoName == "" { + for _, repoAndTag := range daemon.Repositories().ByID()[img.ID] { + parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag) + if repoName == "" || repoName == parsedRepo { + repoName = parsedRepo + if parsedTag != "" { + tags = append(tags, parsedTag) + } + } else if repoName != parsedRepo && !force { + // the id belongs to multiple repos, like base:latest and user:test, + // in that case return conflict + return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) + } + } + } else { + tags = append(tags, tag) + } + + if !first && len(tags) > 0 { + return nil + } + + //Untag the current image + for _, tag := range tags { + tagDeleted, err = daemon.Repositories().Delete(repoName, tag) + if err != nil { + return err + } + if tagDeleted { + out := &engine.Env{} + out.Set("Untagged", repoName+":"+tag) + imgs.Add(out) + eng.Job("log", "untag", img.ID, "").Run() + } + } + tags = daemon.Repositories().ByID()[img.ID] + if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { + if len(byParents[img.ID]) == 0 { + if err := daemon.canDeleteImage(img.ID, force, tagDeleted); err != nil { + return err + } + if err := daemon.Repositories().DeleteAll(img.ID); err != nil { + return err + } + if err := daemon.Graph().Delete(img.ID); err != nil { + return err + } + out := &engine.Env{} + out.Set("Deleted", img.ID) + imgs.Add(out) + eng.Job("log", "delete", img.ID, "").Run() + if img.Parent != "" && !noprune { + err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune) + if first { + return err + } + + } + + } + } + return nil +} + +func (daemon *Daemon) canDeleteImage(imgID string, force, untagged bool) error { + var message string + if untagged { + message = " (docker untagged the image)" + } + for _, container := range daemon.List() { + parent, err := daemon.Repositories().LookupImage(container.Image) + if err != nil { + return err + } + + if err := parent.WalkHistory(func(p *image.Image) error { + if imgID == p.ID { + if container.State.IsRunning() { + if force { + return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message) + } + return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message) + } else if !force { + return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message) + } + } + return nil + }); err != nil { + return err + } + } + return nil +} diff --git a/components/engine/daemon/info.go b/components/engine/daemon/info.go new file mode 100644 index 0000000000..3d3c9ba6ca --- /dev/null +++ b/components/engine/daemon/info.go @@ -0,0 +1,74 @@ +package daemon + +import ( + "os" + "runtime" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { + images, _ := daemon.Graph().Map() + var imgcount int + if images == nil { + imgcount = 0 + } else { + imgcount = len(images) + } + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err == nil { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err == nil { + operatingSystem = s + } + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + log.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) + initPath := utils.DockerInitPath("") + if initPath == "" { + // if that fails, we'll just return the path from the daemon + initPath = daemon.SystemInitPath() + } + + cjob := job.Eng.Job("subscribers_count") + env, _ := cjob.Stdout.AddEnv() + if err := cjob.Run(); err != nil { + return job.Error(err) + } + v := &engine.Env{} + v.SetInt("Containers", len(daemon.List())) + v.SetInt("Images", imgcount) + v.Set("Driver", daemon.GraphDriver().String()) + v.SetJson("DriverStatus", daemon.GraphDriver().Status()) + v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) + v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) + v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) + v.SetBool("Debug", os.Getenv("DEBUG") != "") + v.SetInt("NFd", utils.GetTotalUsedFds()) + v.SetInt("NGoroutines", runtime.NumGoroutine()) + v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) + v.SetInt("NEventsListener", env.GetInt("count")) + v.Set("KernelVersion", kernelVersion) + v.Set("OperatingSystem", operatingSystem) + v.Set("IndexServerAddress", registry.IndexServerAddress()) + v.Set("InitSha1", dockerversion.INITSHA1) + v.Set("InitPath", initPath) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/components/engine/daemon/inspect.go b/components/engine/daemon/inspect.go index b93aec5059..373b43b8b6 100644 --- a/components/engine/daemon/inspect.go +++ b/components/engine/daemon/inspect.go @@ -4,8 +4,8 @@ import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runconfig" + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" ) func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go new file mode 100644 index 0000000000..f5f5897c88 --- /dev/null +++ b/components/engine/daemon/kill.go @@ -0,0 +1,59 @@ +package daemon + +import ( + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/signal" +) + +// ContainerKill send signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) + } + var ( + name = job.Args[0] + sig uint64 + err error + ) + + // If we have a signal, look at it. Otherwise, do nothing + if len(job.Args) == 2 && job.Args[1] != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sig, err = strconv.ParseUint(job.Args[1], 10, 5) + if err != nil { + // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") + sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) + } + + if sig == 0 { + return job.Errorf("Invalid signal: %s", job.Args[1]) + } + } + + if container := daemon.Get(name); container != nil { + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + if err := container.Kill(); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + container.LogEvent("kill") + } else { + // Otherwise, just send the requested signal + if err := container.KillSig(int(sig)); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + // FIXME: Add event for signals + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} diff --git a/components/engine/daemon/list.go b/components/engine/daemon/list.go new file mode 100644 index 0000000000..2da5254866 --- /dev/null +++ b/components/engine/daemon/list.go @@ -0,0 +1,151 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/pkg/graphdb" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers/filters" +) + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*Container { + return daemon.containers.List() +} + +func (daemon *Daemon) Containers(job *engine.Job) engine.Status { + var ( + foundBefore bool + displayed int + all = job.GetenvBool("all") + since = job.Getenv("since") + before = job.Getenv("before") + n = job.GetenvInt("limit") + size = job.GetenvBool("size") + psFilters filters.Args + filt_exited []int + ) + outs := engine.NewTable("Created", 0) + + psFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := psFilters["exited"]; ok { + for _, value := range i { + code, err := strconv.Atoi(value) + if err != nil { + return job.Error(err) + } + filt_exited = append(filt_exited, code) + } + } + + names := map[string][]string{} + daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { + names[e.ID()] = append(names[e.ID()], p) + return nil + }, -1) + + var beforeCont, sinceCont *Container + if before != "" { + beforeCont = daemon.Get(before) + if beforeCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", before)) + } + } + + if since != "" { + sinceCont = daemon.Get(since) + if sinceCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", since)) + } + } + + errLast := errors.New("last container") + writeCont := func(container *Container) error { + container.Lock() + defer container.Unlock() + if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { + return nil + } + if before != "" && !foundBefore { + if container.ID == beforeCont.ID { + foundBefore = true + } + return nil + } + if n > 0 && displayed == n { + return errLast + } + if since != "" { + if container.ID == sinceCont.ID { + return errLast + } + } + if len(filt_exited) > 0 && !container.State.IsRunning() { + should_skip := true + for _, code := range filt_exited { + if code == container.State.GetExitCode() { + should_skip = false + break + } + } + if should_skip { + return nil + } + } + displayed++ + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetList("Names", names[container.ID]) + out.Set("Image", daemon.Repositories().ImageName(container.Image)) + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString)) + } else { + out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) + } + out.SetInt64("Created", container.Created.Unix()) + out.Set("Status", container.State.String()) + str, err := container.NetworkSettings.PortMappingAPI().ToListString() + if err != nil { + return err + } + out.Set("Ports", str) + if size { + sizeRw, sizeRootFs := container.GetSize() + out.SetInt64("SizeRw", sizeRw) + out.SetInt64("SizeRootFs", sizeRootFs) + } + outs.Add(out) + return nil + } + + for _, container := range daemon.List() { + if err := writeCont(container); err != nil { + if err != errLast { + return job.Error(err) + } + break + } + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/components/engine/daemon/logs.go b/components/engine/daemon/logs.go new file mode 100644 index 0000000000..386d9c69c3 --- /dev/null +++ b/components/engine/daemon/logs.go @@ -0,0 +1,134 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "time" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tailfile" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/jsonlog" +) + +func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + tail = job.Getenv("tail") + follow = job.GetenvBool("follow") + times = job.GetenvBool("timestamps") + lines = -1 + format string + ) + if !(stdout || stderr) { + return job.Errorf("You must choose at least one stream") + } + if times { + format = time.RFC3339Nano + } + if tail == "" { + tail = "all" + } + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + log.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + log.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + log.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + log.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + log.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + log.Errorf("Error reading logs (json): %s", err) + } else { + if tail != "all" { + var err error + lines, err = strconv.Atoi(tail) + if err != nil { + log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) + lines = -1 + } + } + if lines != 0 { + if lines > 0 { + f := cLog.(*os.File) + ls, err := tailfile.TailFile(f, lines) + if err != nil { + return job.Error(err) + } + tmp := bytes.NewBuffer([]byte{}) + for _, l := range ls { + fmt.Fprintf(tmp, "%s\n", l) + } + cLog = tmp + } + dec := json.NewDecoder(cLog) + for { + l := &jsonlog.JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + log.Errorf("Error streaming logs: %s", err) + break + } + logLine := l.Log + if times { + logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) + } + if l.Stream == "stdout" && stdout { + fmt.Fprintf(job.Stdout, "%s", logLine) + } + if l.Stream == "stderr" && stderr { + fmt.Fprintf(job.Stderr, "%s", logLine) + } + } + } + } + if follow { + errors := make(chan error, 2) + if stdout { + stdoutPipe := container.StdoutLogPipe() + go func() { + errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) + }() + } + if stderr { + stderrPipe := container.StderrLogPipe() + go func() { + errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) + }() + } + err := <-errors + if err != nil { + log.Errorf("%s", err) + } + } + return engine.StatusOK +} diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go new file mode 100644 index 0000000000..1a929656fa --- /dev/null +++ b/components/engine/daemon/monitor.go @@ -0,0 +1,315 @@ +package daemon + +import ( + "io" + "os/exec" + "sync" + "time" + + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/runconfig" +) + +const defaultTimeIncrement = 100 + +// containerMonitor monitors the execution of a container's main process. +// If a restart policy is specified for the cotnainer the monitor will ensure that the +// process is restarted based on the rules of the policy. When the container is finally stopped +// the monitor will reset and cleanup any of the container resources such as networking allocations +// and the rootfs +type containerMonitor struct { + mux sync.Mutex + + // container is the container being monitored + container *Container + + // restartPolicy is the current policy being applied to the container monitor + restartPolicy runconfig.RestartPolicy + + // failureCount is the number of times the container has failed to + // start in a row + failureCount int + + // shouldStop signals the monitor that the next time the container exits it is + // either because docker or the user asked for the container to be stopped + shouldStop bool + + // startSignal is a channel that is closes after the container initially starts + startSignal chan struct{} + + // stopChan is used to signal to the monitor whenever there is a wait for the + // next restart so that the timeIncrement is not honored and the user is not + // left waiting for nothing to happen during this time + stopChan chan struct{} + + // timeIncrement is the amount of time to wait between restarts + // this is in milliseconds + timeIncrement int + + // lastStartTime is the time which the monitor last exec'd the container's process + lastStartTime time.Time +} + +// newContainerMonitor returns an initialized containerMonitor for the provided container +// honoring the provided restart policy +func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { + return &containerMonitor{ + container: container, + restartPolicy: policy, + timeIncrement: defaultTimeIncrement, + stopChan: make(chan struct{}), + startSignal: make(chan struct{}), + } +} + +// Stop signals to the container monitor that it should stop monitoring the container +// for exits the next time the process dies +func (m *containerMonitor) ExitOnNext() { + m.mux.Lock() + + // we need to protect having a double close of the channel when stop is called + // twice or else we will get a panic + if !m.shouldStop { + m.shouldStop = true + close(m.stopChan) + } + + m.mux.Unlock() +} + +// Close closes the container's resources such as networking allocations and +// unmounts the contatiner's root filesystem +func (m *containerMonitor) Close() error { + // Cleanup networking and mounts + m.container.cleanup() + + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + if err := m.container.toDisk(); err != nil { + log.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) + + return err + } + + return nil +} + +// Start starts the containers process and monitors it according to the restart policy +func (m *containerMonitor) Start() error { + var ( + err error + exitStatus int + ) + + // this variable indicates that we under container.Lock + underLock := true + + // ensure that when the monitor finally exits we release the networking and unmount the rootfs + defer func() { + if !underLock { + m.container.Lock() + defer m.container.Unlock() + } + m.Close() + }() + + // reset the restart count + m.container.RestartCount = -1 + + for { + m.container.RestartCount++ + + if err := m.container.startLoggingToDisk(); err != nil { + m.resetContainer() + + return err + } + + pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) + + m.container.LogEvent("start") + + m.lastStartTime = time.Now() + + if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + if m.container.RestartCount == 0 { + m.resetContainer() + + return err + } + + log.Errorf("Error running container: %s", err) + } + + // here container.Lock is already lost + underLock = false + + m.resetMonitor(err == nil && exitStatus == 0) + + if m.shouldRestart(exitStatus) { + m.container.State.SetRestarting(exitStatus) + + m.container.LogEvent("die") + + m.resetContainer() + + // sleep with a small time increment between each restart to help avoid issues cased by quickly + // restarting the container because of some types of errors ( networking cut out, etc... ) + m.waitForNextRestart() + + // we need to check this before reentering the loop because the waitForNextRestart could have + // been terminated by a request from a user + if m.shouldStop { + m.container.State.SetStopped(exitStatus) + + return err + } + + continue + } + + m.container.State.SetStopped(exitStatus) + + m.container.LogEvent("die") + + m.resetContainer() + + break + } + + return err +} + +// resetMonitor resets the stateful fields on the containerMonitor based on the +// previous runs success or failure. Reguardless of success, if the container had +// an execution time of more than 10s then reset the timer back to the default +func (m *containerMonitor) resetMonitor(successful bool) { + executionTime := time.Now().Sub(m.lastStartTime).Seconds() + + if executionTime > 10 { + m.timeIncrement = defaultTimeIncrement + } else { + // otherwise we need to increment the amount of time we wait before restarting + // the process. We will build up by multiplying the increment by 2 + m.timeIncrement *= 2 + } + + // the container exited successfully so we need to reset the failure counter + if successful { + m.failureCount = 0 + } else { + m.failureCount++ + } +} + +// waitForNextRestart waits with the default time increment to restart the container unless +// a user or docker asks for the container to be stopped +func (m *containerMonitor) waitForNextRestart() { + select { + case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): + case <-m.stopChan: + } +} + +// shouldRestart checks the restart policy and applies the rules to determine if +// the container's process should be restarted +func (m *containerMonitor) shouldRestart(exitStatus int) bool { + m.mux.Lock() + defer m.mux.Unlock() + + // do not restart if the user or docker has requested that this container be stopped + if m.shouldStop { + return false + } + + switch m.restartPolicy.Name { + case "always": + return true + case "on-failure": + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount >= max { + log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", max) + return false + } + + return exitStatus != 0 + } + + return false +} + +// callback ensures that the container's state is properly updated after we +// received ack from the execution drivers +func (m *containerMonitor) callback(command *execdriver.Command) { + if command.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace + // which we close here. + if c, ok := command.Stdout.(io.Closer); ok { + c.Close() + } + } + + m.container.State.SetRunning(command.Pid()) + + // signal that the process has started + // close channel only if not closed + select { + case <-m.startSignal: + default: + close(m.startSignal) + } + + if err := m.container.ToDisk(); err != nil { + log.Debugf("%s", err) + } +} + +// resetContainer resets the container's IO and ensures that the command is able to be executed again +// by copying the data into a new struct +func (m *containerMonitor) resetContainer() { + container := m.container + + if container.Config.OpenStdin { + if err := container.stdin.Close(); err != nil { + log.Errorf("%s: Error close stdin: %s", container.ID, err) + } + } + + if err := container.stdout.Clean(); err != nil { + log.Errorf("%s: Error close stdout: %s", container.ID, err) + } + + if err := container.stderr.Clean(); err != nil { + log.Errorf("%s: Error close stderr: %s", container.ID, err) + } + + if container.command != nil && container.command.Terminal != nil { + if err := container.command.Terminal.Close(); err != nil { + log.Errorf("%s: Error closing terminal: %s", container.ID, err) + } + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } + + c := container.command.Cmd + + container.command.Cmd = exec.Cmd{ + Stdin: c.Stdin, + Stdout: c.Stdout, + Stderr: c.Stderr, + Path: c.Path, + Env: c.Env, + ExtraFiles: c.ExtraFiles, + Args: c.Args, + Dir: c.Dir, + SysProcAttr: c.SysProcAttr, + } +} diff --git a/components/engine/daemon/network_settings.go b/components/engine/daemon/network_settings.go index a5c750acfe..bf28ca1b50 100644 --- a/components/engine/daemon/network_settings.go +++ b/components/engine/daemon/network_settings.go @@ -1,8 +1,8 @@ package daemon import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" + "github.com/docker/docker/engine" + "github.com/docker/docker/nat" ) // FIXME: move deprecated port stuff to nat to clean up the core. diff --git a/components/engine/daemon/networkdriver/bridge/driver.go b/components/engine/daemon/networkdriver/bridge/driver.go index a843da0499..06cf37e79f 100644 --- a/components/engine/daemon/networkdriver/bridge/driver.go +++ b/components/engine/daemon/networkdriver/bridge/driver.go @@ -3,20 +3,20 @@ package bridge import ( "fmt" "io/ioutil" - "log" "net" "strings" "sync" + "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/daemon/networkdriver/ipallocator" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/daemon/networkdriver/portmapper" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/iptables" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/libcontainer/netlink" - "github.com/dotcloud/docker/daemon/networkdriver" - "github.com/dotcloud/docker/daemon/networkdriver/ipallocator" - "github.com/dotcloud/docker/daemon/networkdriver/portallocator" - "github.com/dotcloud/docker/daemon/networkdriver/portmapper" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/networkfs/resolvconf" - "github.com/dotcloud/docker/utils" ) const ( @@ -158,7 +158,7 @@ func InitDriver(job *engine.Job) engine.Status { bridgeNetwork = network - // https://github.com/dotcloud/docker/issues/2768 + // https://github.com/docker/docker/issues/2768 job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) for name, f := range map[string]engine.Handler{ @@ -176,7 +176,7 @@ func InitDriver(job *engine.Job) engine.Status { func setupIPTables(addr net.Addr, icc bool) error { // Enable NAT - natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} + natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"} if !iptables.Exists(natArgs...) { if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { @@ -196,7 +196,7 @@ func setupIPTables(addr net.Addr, icc bool) error { iptables.Raw(append([]string{"-D"}, acceptArgs...)...) if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") + log.Debugf("Disable inter-container communication") if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) } else if len(output) != 0 { @@ -207,7 +207,7 @@ func setupIPTables(addr net.Addr, icc bool) error { iptables.Raw(append([]string{"-D"}, dropArgs...)...) if !iptables.Exists(acceptArgs...) { - utils.Debugf("Enable inter-container communication") + log.Debugf("Enable inter-container communication") if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { return fmt.Errorf("Unable to allow intercontainer communication: %s", err) } else if len(output) != 0 { @@ -271,7 +271,7 @@ func createBridge(bridgeIP string) error { ifaceAddr = addr break } else { - utils.Debugf("%s %s", addr, err) + log.Debugf("%s %s", addr, err) } } } @@ -280,7 +280,7 @@ func createBridge(bridgeIP string) error { if ifaceAddr == "" { return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) } - utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) + log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) if err := createBridgeIface(bridgeIface); err != nil { return err @@ -306,11 +306,11 @@ func createBridge(bridgeIP string) error { } func createBridgeIface(name string) error { - kv, err := utils.GetKernelVersion() + kv, err := kernel.GetKernelVersion() // only set the bridge's mac address if the kernel version is > 3.3 // before that it was not supported setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) - utils.Debugf("setting bridge mac address = %v", setBridgeMacAddr) + log.Debugf("setting bridge mac address = %v", setBridgeMacAddr) return netlink.CreateBridge(name, setBridgeMacAddr) } @@ -363,12 +363,12 @@ func Release(job *engine.Job) engine.Status { for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { - log.Printf("Unable to unmap port %s: %s", nat, err) + log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { - log.Printf("Unable to release ip %s\n", err) + log.Infof("Unable to release ip %s", err) } return engine.StatusOK } @@ -415,8 +415,7 @@ func AllocatePort(job *engine.Job) engine.Status { break } - switch allocerr := err.(type) { - case portallocator.ErrPortAlreadyAllocated: + if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok { // There is no point in immediately retrying to map an explicitly // chosen port. if hostPort != 0 { @@ -426,7 +425,7 @@ func AllocatePort(job *engine.Job) engine.Status { // Automatically chosen 'free' port failed to bind: move on the next. job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String()) - default: + } else { // some other error during mapping job.Logf("Received an unexpected error during port allocation: %s", err.Error()) break diff --git a/components/engine/daemon/networkdriver/bridge/driver_test.go b/components/engine/daemon/networkdriver/bridge/driver_test.go index f8ddd4c64e..9bc6c32eb4 100644 --- a/components/engine/daemon/networkdriver/bridge/driver_test.go +++ b/components/engine/daemon/networkdriver/bridge/driver_test.go @@ -1,14 +1,19 @@ package bridge import ( - "fmt" "net" "strconv" "testing" - "github.com/dotcloud/docker/engine" + "github.com/docker/docker/daemon/networkdriver/portmapper" + "github.com/docker/docker/engine" ) +func init() { + // reset the new proxy command for mocking out the userland proxy in tests + portmapper.NewProxy = portmapper.NewMockProxyCommand +} + func findFreePort(t *testing.T) int { l, err := net.Listen("tcp", ":0") if err != nil { @@ -61,46 +66,3 @@ func TestAllocatePortDetection(t *testing.T) { t.Fatal("Duplicate port allocation granted by AllocatePort") } } - -func TestAllocatePortReclaim(t *testing.T) { - eng := engine.New() - eng.Logging = false - - freePort := findFreePort(t) - - // Init driver - job := eng.Job("initdriver") - if res := InitDriver(job); res != engine.StatusOK { - t.Fatal("Failed to initialize network driver") - } - - // Allocate interface - job = eng.Job("allocate_interface", "container_id") - if res := Allocate(job); res != engine.StatusOK { - t.Fatal("Failed to allocate network interface") - } - - // Occupy port - listenAddr := fmt.Sprintf(":%d", freePort) - tcpListenAddr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - t.Fatalf("Failed to resolve TCP address '%s'", listenAddr) - } - - l, err := net.ListenTCP("tcp", tcpListenAddr) - if err != nil { - t.Fatalf("Fail to listen on port %d", freePort) - } - - // Allocate port, expect failure - job = newPortAllocationJob(eng, freePort) - if res := AllocatePort(job); res == engine.StatusOK { - t.Fatal("Successfully allocated currently used port") - } - - // Reclaim port, retry allocation - l.Close() - if res := AllocatePort(job); res != engine.StatusOK { - t.Fatal("Failed to allocate previously reclaimed port") - } -} diff --git a/components/engine/daemon/networkdriver/ipallocator/allocator.go b/components/engine/daemon/networkdriver/ipallocator/allocator.go index f154b0bd49..1bf8e1da9b 100644 --- a/components/engine/daemon/networkdriver/ipallocator/allocator.go +++ b/components/engine/daemon/networkdriver/ipallocator/allocator.go @@ -3,7 +3,7 @@ package ipallocator import ( "encoding/binary" "errors" - "github.com/dotcloud/docker/daemon/networkdriver" + "github.com/docker/docker/daemon/networkdriver" "net" "sync" ) diff --git a/components/engine/daemon/networkdriver/portallocator/portallocator.go b/components/engine/daemon/networkdriver/portallocator/portallocator.go index c722ba98ba..d4fcc6e725 100644 --- a/components/engine/daemon/networkdriver/portallocator/portallocator.go +++ b/components/engine/daemon/networkdriver/portallocator/portallocator.go @@ -12,10 +12,22 @@ type portMap struct { last int } -type ( - protocolMap map[string]*portMap - ipMapping map[string]protocolMap -) +func newPortMap() *portMap { + return &portMap{ + p: map[int]struct{}{}, + } +} + +type protoMap map[string]*portMap + +func newProtoMap() protoMap { + return protoMap{ + "tcp": newPortMap(), + "udp": newPortMap(), + } +} + +type ipMapping map[string]protoMap const ( BeginPortRange = 49153 @@ -62,107 +74,83 @@ func (e ErrPortAlreadyAllocated) Error() string { return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port) } +// RequestPort requests new port from global ports pool for specified ip and proto. +// If port is 0 it returns first free port. Otherwise it cheks port availability +// in pool and return that port or error if port is already busy. func RequestPort(ip net.IP, proto string, port int) (int, error) { mutex.Lock() defer mutex.Unlock() - if err := validateProto(proto); err != nil { + if proto != "tcp" && proto != "udp" { + return 0, ErrUnknownProtocol + } + + if ip == nil { + ip = defaultIP + } + ipstr := ip.String() + protomap, ok := globalMap[ipstr] + if !ok { + protomap = newProtoMap() + globalMap[ipstr] = protomap + } + mapping := protomap[proto] + if port > 0 { + if _, ok := mapping.p[port]; !ok { + mapping.p[port] = struct{}{} + return port, nil + } + return 0, NewErrPortAlreadyAllocated(ipstr, port) + } + + port, err := mapping.findPort() + if err != nil { return 0, err } - - ip = getDefault(ip) - - mapping := getOrCreate(ip) - - if port > 0 { - if _, ok := mapping[proto].p[port]; !ok { - mapping[proto].p[port] = struct{}{} - return port, nil - } else { - return 0, NewErrPortAlreadyAllocated(ip.String(), port) - } - } else { - port, err := findPort(ip, proto) - - if err != nil { - return 0, err - } - - return port, nil - } + return port, nil } +// ReleasePort releases port from global ports pool for specified ip and proto. func ReleasePort(ip net.IP, proto string, port int) error { mutex.Lock() defer mutex.Unlock() - ip = getDefault(ip) - - mapping := getOrCreate(ip)[proto] - delete(mapping.p, port) - + if ip == nil { + ip = defaultIP + } + protomap, ok := globalMap[ip.String()] + if !ok { + return nil + } + delete(protomap[proto].p, port) return nil } +// ReleaseAll releases all ports for all ips. func ReleaseAll() error { mutex.Lock() - defer mutex.Unlock() - globalMap = ipMapping{} - + mutex.Unlock() return nil } -func getOrCreate(ip net.IP) protocolMap { - ipstr := ip.String() - - if _, ok := globalMap[ipstr]; !ok { - globalMap[ipstr] = protocolMap{ - "tcp": &portMap{p: map[int]struct{}{}, last: 0}, - "udp": &portMap{p: map[int]struct{}{}, last: 0}, - } - } - - return globalMap[ipstr] -} - -func findPort(ip net.IP, proto string) (int, error) { - mapping := getOrCreate(ip)[proto] - - if mapping.last == 0 { - mapping.p[BeginPortRange] = struct{}{} - mapping.last = BeginPortRange +func (pm *portMap) findPort() (int, error) { + if pm.last == 0 { + pm.p[BeginPortRange] = struct{}{} + pm.last = BeginPortRange return BeginPortRange, nil } - for port := mapping.last + 1; port != mapping.last; port++ { + for port := pm.last + 1; port != pm.last; port++ { if port > EndPortRange { port = BeginPortRange } - if _, ok := mapping.p[port]; !ok { - mapping.p[port] = struct{}{} - mapping.last = port + if _, ok := pm.p[port]; !ok { + pm.p[port] = struct{}{} + pm.last = port return port, nil } - } - return 0, ErrAllPortsAllocated } - -func getDefault(ip net.IP) net.IP { - if ip == nil { - return defaultIP - } - - return ip -} - -func validateProto(proto string) error { - if proto != "tcp" && proto != "udp" { - return ErrUnknownProtocol - } - - return nil -} diff --git a/components/engine/daemon/networkdriver/portmapper/mapper.go b/components/engine/daemon/networkdriver/portmapper/mapper.go index 1bd332271f..a81596d458 100644 --- a/components/engine/daemon/networkdriver/portmapper/mapper.go +++ b/components/engine/daemon/networkdriver/portmapper/mapper.go @@ -6,14 +6,13 @@ import ( "net" "sync" - "github.com/dotcloud/docker/daemon/networkdriver/portallocator" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/proxy" + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/pkg/iptables" ) type mapping struct { proto string - userlandProxy proxy.Proxy + userlandProxy UserlandProxy host net.Addr container net.Addr } @@ -24,7 +23,8 @@ var ( // udp:ip:port currentMappings = make(map[string]*mapping) - newProxy = proxy.NewProxy + + NewProxy = NewProxyCommand ) var ( @@ -37,54 +37,58 @@ func SetIptablesChain(c *iptables.Chain) { chain = c } -func Map(container net.Addr, hostIP net.IP, hostPort int) (net.Addr, error) { +func Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) { lock.Lock() defer lock.Unlock() var ( m *mapping - err error proto string allocatedHostPort int + proxy UserlandProxy ) - // release the port on any error during return. - defer func() { - if err != nil { - portallocator.ReleasePort(hostIP, proto, allocatedHostPort) - } - }() - switch container.(type) { case *net.TCPAddr: proto = "tcp" if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil { return nil, err } + m = &mapping{ proto: proto, host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort}, container: container, } + + proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port) case *net.UDPAddr: proto = "udp" if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil { return nil, err } + m = &mapping{ proto: proto, host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort}, container: container, } + + proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port) default: - err = ErrUnknownBackendAddressType - return nil, err + return nil, ErrUnknownBackendAddressType } + // release the allocated port on any further error during return. + defer func() { + if err != nil { + portallocator.ReleasePort(hostIP, proto, allocatedHostPort) + } + }() + key := getKey(m.host) if _, exists := currentMappings[key]; exists { - err = ErrPortMappedForIP - return nil, err + return nil, ErrPortMappedForIP } containerIP, containerPort := getIPAndPort(m.container) @@ -92,17 +96,15 @@ func Map(container net.Addr, hostIP net.IP, hostPort int) (net.Addr, error) { return nil, err } - p, err := newProxy(m.host, m.container) - if err != nil { - // need to undo the iptables rules before we return - forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort) - return nil, err - } - - m.userlandProxy = p + m.userlandProxy = proxy currentMappings[key] = m - go p.Run() + if err := proxy.Start(); err != nil { + // need to undo the iptables rules before we return + forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort) + + return nil, err + } return m.host, nil } @@ -117,7 +119,8 @@ func Unmap(host net.Addr) error { return ErrPortNotMapped } - data.userlandProxy.Close() + data.userlandProxy.Stop() + delete(currentMappings, key) containerIP, containerPort := getIPAndPort(data.container) diff --git a/components/engine/daemon/networkdriver/portmapper/mapper_test.go b/components/engine/daemon/networkdriver/portmapper/mapper_test.go index 6affdc5445..42e44a11df 100644 --- a/components/engine/daemon/networkdriver/portmapper/mapper_test.go +++ b/components/engine/daemon/networkdriver/portmapper/mapper_test.go @@ -1,16 +1,16 @@ package portmapper import ( - "github.com/dotcloud/docker/daemon/networkdriver/portallocator" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/proxy" "net" "testing" + + "github.com/docker/docker/daemon/networkdriver/portallocator" + "github.com/docker/docker/pkg/iptables" ) func init() { // override this func to mock out the proxy server - newProxy = proxy.NewStubProxy + NewProxy = NewMockProxyCommand } func reset() { @@ -138,7 +138,7 @@ func TestMapAllPortsSingleInterface(t *testing.T) { } if _, err := Map(srcAddr1, dstIp1, portallocator.BeginPortRange); err == nil { - t.Fatal("Port %d should be bound but is not", portallocator.BeginPortRange) + t.Fatalf("Port %d should be bound but is not", portallocator.BeginPortRange) } for _, val := range hosts { diff --git a/components/engine/daemon/networkdriver/portmapper/mock_proxy.go b/components/engine/daemon/networkdriver/portmapper/mock_proxy.go new file mode 100644 index 0000000000..253ce83112 --- /dev/null +++ b/components/engine/daemon/networkdriver/portmapper/mock_proxy.go @@ -0,0 +1,18 @@ +package portmapper + +import "net" + +func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { + return &mockProxyCommand{} +} + +type mockProxyCommand struct { +} + +func (p *mockProxyCommand) Start() error { + return nil +} + +func (p *mockProxyCommand) Stop() error { + return nil +} diff --git a/components/engine/daemon/networkdriver/portmapper/proxy.go b/components/engine/daemon/networkdriver/portmapper/proxy.go new file mode 100644 index 0000000000..b24723727b --- /dev/null +++ b/components/engine/daemon/networkdriver/portmapper/proxy.go @@ -0,0 +1,119 @@ +package portmapper + +import ( + "flag" + "log" + "net" + "os" + "os/exec" + "os/signal" + "strconv" + "syscall" + + "github.com/docker/docker/pkg/proxy" + "github.com/docker/docker/reexec" +) + +const userlandProxyCommandName = "docker-proxy" + +func init() { + reexec.Register(userlandProxyCommandName, execProxy) +} + +type UserlandProxy interface { + Start() error + Stop() error +} + +// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP +// proxies as separate processes. +type proxyCommand struct { + cmd *exec.Cmd +} + +// execProxy is the reexec function that is registered to start the userland proxies +func execProxy() { + host, container := parseHostContainerAddrs() + + p, err := proxy.NewProxy(host, container) + if err != nil { + log.Fatal(err) + } + + go handleStopSignals(p) + + // Run will block until the proxy stops + p.Run() +} + +// parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP +// net.Addrs to map the host and container ports +func parseHostContainerAddrs() (host net.Addr, container net.Addr) { + var ( + proto = flag.String("proto", "tcp", "proxy protocol") + hostIP = flag.String("host-ip", "", "host ip") + hostPort = flag.Int("host-port", -1, "host port") + containerIP = flag.String("container-ip", "", "container ip") + containerPort = flag.Int("container-port", -1, "container port") + ) + + flag.Parse() + + switch *proto { + case "tcp": + host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} + container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} + case "udp": + host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} + container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} + default: + log.Fatalf("unsupported protocol %s", *proto) + } + + return host, container +} + +func handleStopSignals(p proxy.Proxy) { + s := make(chan os.Signal, 10) + signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP) + + for _ = range s { + p.Close() + + os.Exit(0) + } +} + +func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { + args := []string{ + userlandProxyCommandName, + "-proto", proto, + "-host-ip", hostIP.String(), + "-host-port", strconv.Itoa(hostPort), + "-container-ip", containerIP.String(), + "-container-port", strconv.Itoa(containerPort), + } + + return &proxyCommand{ + cmd: &exec.Cmd{ + Path: reexec.Self(), + Args: args, + Stdout: os.Stdout, + Stderr: os.Stderr, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies + }, + }, + } +} + +func (p *proxyCommand) Start() error { + return p.cmd.Start() +} + +func (p *proxyCommand) Stop() error { + err := p.cmd.Process.Signal(os.Interrupt) + p.cmd.Wait() + + return err +} diff --git a/components/engine/daemon/pause.go b/components/engine/daemon/pause.go new file mode 100644 index 0000000000..0e4323d9a8 --- /dev/null +++ b/components/engine/daemon/pause.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Pause(); err != nil { + return job.Errorf("Cannot pause container %s: %s", name, err) + } + container.LogEvent("pause") + return engine.StatusOK +} + +func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + container := daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + if err := container.Unpause(); err != nil { + return job.Errorf("Cannot unpause container %s: %s", name, err) + } + container.LogEvent("unpause") + return engine.StatusOK +} diff --git a/components/engine/daemon/resize.go b/components/engine/daemon/resize.go new file mode 100644 index 0000000000..dd196ff6c4 --- /dev/null +++ b/components/engine/daemon/resize.go @@ -0,0 +1,29 @@ +package daemon + +import ( + "strconv" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) + } + name := job.Args[0] + height, err := strconv.Atoi(job.Args[1]) + if err != nil { + return job.Error(err) + } + width, err := strconv.Atoi(job.Args[2]) + if err != nil { + return job.Error(err) + } + if container := daemon.Get(name); container != nil { + if err := container.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} diff --git a/components/engine/daemon/restart.go b/components/engine/daemon/restart.go new file mode 100644 index 0000000000..bcc057156d --- /dev/null +++ b/components/engine/daemon/restart.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := daemon.Get(name); container != nil { + if err := container.Restart(int(t)); err != nil { + return job.Errorf("Cannot restart container %s: %s\n", name, err) + } + container.LogEvent("restart") + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} diff --git a/components/engine/daemon/server.go b/components/engine/daemon/server.go deleted file mode 100644 index dbe6a8ebe8..0000000000 --- a/components/engine/daemon/server.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon - -import ( - "github.com/dotcloud/docker/utils" -) - -type Server interface { - LogEvent(action, id, from string) *utils.JSONMessage - IsRunning() bool // returns true if the server is currently in operation -} diff --git a/components/engine/daemon/start.go b/components/engine/daemon/start.go new file mode 100644 index 0000000000..30e015496f --- /dev/null +++ b/components/engine/daemon/start.go @@ -0,0 +1,67 @@ +package daemon + +import ( + "fmt" + "os" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/runconfig" +) + +func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + var ( + name = job.Args[0] + container = daemon.Get(name) + ) + + if container == nil { + return job.Errorf("No such container: %s", name) + } + + if container.State.IsRunning() { + return job.Errorf("Container already started") + } + + // If no environment was set, then no hostconfig was passed. + if len(job.Environ()) > 0 { + hostConfig := runconfig.ContainerHostConfigFromJob(job) + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return job.Error(err) + } + } + if err := container.Start(); err != nil { + return job.Errorf("Cannot start container %s: %s", name, err) + } + + return engine.StatusOK +} + +func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + // Validate the HostConfig binds. Make sure that: + // the source exists + for _, bind := range hostConfig.Binds { + splitBind := strings.Split(bind, ":") + source := splitBind[0] + + // ensure the source exists on the host + _, err := os.Stat(source) + if err != nil && os.IsNotExist(err) { + err = os.MkdirAll(source, 0755) + if err != nil { + return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) + } + } + } + // Register any links from the host config before starting the container + if err := daemon.RegisterLinks(container, hostConfig); err != nil { + return err + } + container.SetHostConfig(hostConfig) + container.ToDisk() + + return nil +} diff --git a/components/engine/daemon/state.go b/components/engine/daemon/state.go index 3f904d7829..44742b78c9 100644 --- a/components/engine/daemon/state.go +++ b/components/engine/daemon/state.go @@ -1,17 +1,19 @@ package daemon import ( + "encoding/json" "fmt" "sync" "time" - "github.com/dotcloud/docker/pkg/units" + "github.com/docker/docker/pkg/units" ) type State struct { sync.RWMutex Running bool Paused bool + Restarting bool Pid int ExitCode int StartedAt time.Time @@ -34,14 +36,30 @@ func (s *State) String() string { if s.Paused { return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } + if s.FinishedAt.IsZero() { return "" } + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } +type jState State + +// MarshalJSON for state is needed to avoid race conditions on inspect +func (s *State) MarshalJSON() ([]byte, error) { + s.RLock() + b, err := json.Marshal(jState(*s)) + s.RUnlock() + return b, err +} + func wait(waitChan <-chan struct{}, timeout time.Duration) error { if timeout < 0 { <-waitChan @@ -114,31 +132,52 @@ func (s *State) GetExitCode() int { func (s *State) SetRunning(pid int) { s.Lock() - if !s.Running { - s.Running = true - s.Paused = false - s.ExitCode = 0 - s.Pid = pid - s.StartedAt = time.Now().UTC() - close(s.waitChan) // fire waiters for start - s.waitChan = make(chan struct{}) - } + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + s.StartedAt = time.Now().UTC() + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) s.Unlock() } func (s *State) SetStopped(exitCode int) { s.Lock() - if s.Running { - s.Running = false - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) - } + s.Running = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) s.Unlock() } +// SetRestarting is when docker hanldes the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestarting(exitCode int) { + s.Lock() + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) + s.Unlock() +} + +func (s *State) IsRestarting() bool { + s.RLock() + res := s.Restarting + s.RUnlock() + return res +} + func (s *State) SetPaused() { s.Lock() s.Paused = true diff --git a/components/engine/daemon/state_test.go b/components/engine/daemon/state_test.go index 7b02f3aeac..35524356a3 100644 --- a/components/engine/daemon/state_test.go +++ b/components/engine/daemon/state_test.go @@ -37,7 +37,7 @@ func TestStateRunStop(t *testing.T) { t.Fatalf("Pid %v, expected %v", runPid, i+100) } if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { - t.Fatal("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) + t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) } stopped := make(chan struct{}) @@ -68,7 +68,7 @@ func TestStateRunStop(t *testing.T) { t.Fatalf("ExitCode %v, expected %v", exitCode, i) } if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { - t.Fatal("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) } } } diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go new file mode 100644 index 0000000000..f1851291fb --- /dev/null +++ b/components/engine/daemon/stop.go @@ -0,0 +1,30 @@ +package daemon + +import ( + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := daemon.Get(name); container != nil { + if !container.State.IsRunning() { + return job.Errorf("Container already stopped") + } + if err := container.Stop(int(t)); err != nil { + return job.Errorf("Cannot stop container %s: %s\n", name, err) + } + container.LogEvent("stop") + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} diff --git a/components/engine/daemon/top.go b/components/engine/daemon/top.go new file mode 100644 index 0000000000..ceaeea157e --- /dev/null +++ b/components/engine/daemon/top.go @@ -0,0 +1,79 @@ +package daemon + +import ( + "os/exec" + "strconv" + "strings" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status { + if len(job.Args) != 1 && len(job.Args) != 2 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) + } + var ( + name = job.Args[0] + psArgs = "-ef" + ) + + if len(job.Args) == 2 && job.Args[1] != "" { + psArgs = job.Args[1] + } + + if container := daemon.Get(name); container != nil { + if !container.State.IsRunning() { + return job.Errorf("Container %s is not running", name) + } + pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) + if err != nil { + return job.Error(err) + } + output, err := exec.Command("ps", psArgs).Output() + if err != nil { + return job.Errorf("Error running ps: %s", err) + } + + lines := strings.Split(string(output), "\n") + header := strings.Fields(lines[0]) + out := &engine.Env{} + out.SetList("Titles", header) + + pidIndex := -1 + for i, name := range header { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return job.Errorf("Couldn't find PID field in ps output") + } + + processes := [][]string{} + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(header)-1] + process = append(process, strings.Join(fields[len(header)-1:], " ")) + processes = append(processes, process) + } + } + } + out.SetJson("Processes", processes) + out.WriteTo(job.Stdout) + return engine.StatusOK + + } + return job.Errorf("No such container: %s", name) +} diff --git a/components/engine/daemon/utils.go b/components/engine/daemon/utils.go index d60d985152..053319c5ea 100644 --- a/components/engine/daemon/utils.go +++ b/components/engine/daemon/utils.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runconfig" + "github.com/docker/docker/nat" + "github.com/docker/docker/runconfig" ) func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { diff --git a/components/engine/daemon/utils_linux.go b/components/engine/daemon/utils_linux.go new file mode 100644 index 0000000000..bff2a787b1 --- /dev/null +++ b/components/engine/daemon/utils_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package daemon + +import "github.com/docker/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} diff --git a/components/engine/daemon/utils_nolinux.go b/components/engine/daemon/utils_nolinux.go new file mode 100644 index 0000000000..399376dbd4 --- /dev/null +++ b/components/engine/daemon/utils_nolinux.go @@ -0,0 +1,9 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} diff --git a/components/engine/daemon/utils_test.go b/components/engine/daemon/utils_test.go index 22b52d1699..1f3175b994 100644 --- a/components/engine/daemon/utils_test.go +++ b/components/engine/daemon/utils_test.go @@ -3,8 +3,8 @@ package daemon import ( "testing" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) func TestMergeLxcConfig(t *testing.T) { diff --git a/components/engine/daemon/volumes.go b/components/engine/daemon/volumes.go index f4b3921c9a..b60118c953 100644 --- a/components/engine/daemon/volumes.go +++ b/components/engine/daemon/volumes.go @@ -8,15 +8,29 @@ import ( "strings" "syscall" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/symlink" + "github.com/docker/docker/archive" + "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/symlink" ) -type BindMap struct { - SrcPath string - DstPath string - Mode string +type Volume struct { + HostPath string + VolPath string + Mode string + isBindMount bool +} + +func (v *Volume) isRw() bool { + return v.Mode == "" || strings.ToLower(v.Mode) == "rw" +} + +func (v *Volume) isDir() (bool, error) { + stat, err := os.Stat(v.HostPath) + if err != nil { + return false, err + } + + return stat.IsDir(), nil } func prepareVolumesForContainer(container *Container) error { @@ -36,16 +50,15 @@ func prepareVolumesForContainer(container *Container) error { func setupMountsForContainer(container *Container) error { mounts := []execdriver.Mount{ - {container.daemon.sysInitPath, "/.dockerinit", false, true}, - {container.ResolvConfPath, "/etc/resolv.conf", false, true}, + {container.ResolvConfPath, "/etc/resolv.conf", true, true}, } if container.HostnamePath != "" { - mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true}) + mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", true, true}) } if container.HostsPath != "" { - mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true}) + mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", true, true}) } // Mount user specified volumes @@ -123,181 +136,175 @@ func applyVolumesFrom(container *Container) error { return nil } -func getBindMap(container *Container) (map[string]BindMap, error) { +func parseBindVolumeSpec(spec string) (Volume, error) { + var ( + arr = strings.Split(spec, ":") + vol Volume + ) + + vol.isBindMount = true + switch len(arr) { + case 1: + vol.VolPath = spec + vol.Mode = "rw" + case 2: + vol.HostPath = arr[0] + vol.VolPath = arr[1] + vol.Mode = "rw" + case 3: + vol.HostPath = arr[0] + vol.VolPath = arr[1] + vol.Mode = arr[2] + default: + return vol, fmt.Errorf("Invalid volume specification: %s", spec) + } + + if !filepath.IsAbs(vol.HostPath) { + return vol, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", vol.HostPath) + } + + return vol, nil +} + +func getBindMap(container *Container) (map[string]Volume, error) { var ( // Create the requested bind mounts - binds = make(map[string]BindMap) + volumes = map[string]Volume{} // Define illegal container destinations illegalDsts = []string{"/", "."} ) for _, bind := range container.hostConfig.Binds { - // FIXME: factorize bind parsing in parseBind - var ( - src, dst, mode string - arr = strings.Split(bind, ":") - ) - - if len(arr) == 2 { - src = arr[0] - dst = arr[1] - mode = "rw" - } else if len(arr) == 3 { - src = arr[0] - dst = arr[1] - mode = arr[2] - } else { - return nil, fmt.Errorf("Invalid bind specification: %s", bind) + vol, err := parseBindVolumeSpec(bind) + if err != nil { + return volumes, err } - // Bail if trying to mount to an illegal destination for _, illegal := range illegalDsts { - if dst == illegal { - return nil, fmt.Errorf("Illegal bind destination: %s", dst) + if vol.VolPath == illegal { + return nil, fmt.Errorf("Illegal bind destination: %s", vol.VolPath) } } - bindMap := BindMap{ - SrcPath: src, - DstPath: dst, - Mode: mode, - } - binds[filepath.Clean(dst)] = bindMap + volumes[filepath.Clean(vol.VolPath)] = vol } - return binds, nil + return volumes, nil } func createVolumes(container *Container) error { - binds, err := getBindMap(container) + // Get all the bindmounts + volumes, err := getBindMap(container) if err != nil { return err } - // Create the requested volumes if they don't exist + // Get all the rest of the volumes for volPath := range container.Config.Volumes { - if err := initializeVolume(container, volPath, binds); err != nil { + // Make sure the the volume isn't already specified as a bindmount + if _, exists := volumes[volPath]; !exists { + volumes[volPath] = Volume{ + VolPath: volPath, + Mode: "rw", + isBindMount: false, + } + } + } + + for _, vol := range volumes { + if err = vol.initialize(container); err != nil { + return err + } + } + return nil + +} + +func createVolumeHostPath(container *Container) (string, error) { + volumesDriver := container.daemon.volumes.Driver() + + // Do not pass a container as the parameter for the volume creation. + // The graph driver using the container's information ( Image ) to + // create the parent. + c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil) + if err != nil { + return "", err + } + hostPath, err := volumesDriver.Get(c.ID, "") + if err != nil { + return hostPath, fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) + } + + return hostPath, nil +} + +func (v *Volume) initialize(container *Container) error { + var err error + v.VolPath = filepath.Clean(v.VolPath) + + // Do not initialize an existing volume + if _, exists := container.Volumes[v.VolPath]; exists { + return nil + } + + // If it's not a bindmount we need to create the dir on the host + if !v.isBindMount { + v.HostPath, err = createVolumeHostPath(container) + if err != nil { return err } } - for volPath := range binds { - if err := initializeVolume(container, volPath, binds); err != nil { - return err - } + hostPath, err := filepath.EvalSymlinks(v.HostPath) + if err != nil { + return err + } + + // Create the mountpoint + // This is the path to the volume within the container FS + // This differs from `hostPath` in that `hostPath` refers to the place where + // the volume data is actually stored on the host + fullVolPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, v.VolPath), container.basefs) + if err != nil { + return err + } + + container.Volumes[v.VolPath] = hostPath + container.VolumesRW[v.VolPath] = v.isRw() + + volIsDir, err := v.isDir() + if err != nil { + return err + } + if err := createIfNotExists(fullVolPath, volIsDir); err != nil { + return err + } + + // Do not copy or change permissions if we are mounting from the host + if v.isRw() && !v.isBindMount { + return copyExistingContents(fullVolPath, hostPath) } return nil } func createIfNotExists(destination string, isDir bool) error { - if _, err := os.Stat(destination); err != nil && os.IsNotExist(err) { - if isDir { - if err := os.MkdirAll(destination, 0755); err != nil { - return err - } - } else { - if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil { - return err - } - - f, err := os.OpenFile(destination, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - - return nil -} - -func initializeVolume(container *Container, volPath string, binds map[string]BindMap) error { - volumesDriver := container.daemon.volumes.Driver() - volPath = filepath.Clean(volPath) - - // Skip existing volumes - if _, exists := container.Volumes[volPath]; exists { + if _, err := os.Stat(destination); err == nil || !os.IsNotExist(err) { return nil } - var ( - destination string - isBindMount bool - volIsDir = true - - srcRW = false - ) - - // If an external bind is defined for this volume, use that as a source - if bindMap, exists := binds[volPath]; exists { - isBindMount = true - destination = bindMap.SrcPath - - if !filepath.IsAbs(destination) { - return fmt.Errorf("%s must be an absolute path", destination) - } - - if strings.ToLower(bindMap.Mode) == "rw" { - srcRW = true - } - - if stat, err := os.Stat(bindMap.SrcPath); err != nil { - return err - } else { - volIsDir = stat.IsDir() - } - } else { - // Do not pass a container as the parameter for the volume creation. - // The graph driver using the container's information ( Image ) to - // create the parent. - c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil) - if err != nil { - return err - } - - destination, err = volumesDriver.Get(c.ID, "") - if err != nil { - return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) - } - - srcRW = true + if isDir { + return os.MkdirAll(destination, 0755) } - if p, err := filepath.EvalSymlinks(destination); err != nil { + if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil { return err - } else { - destination = p } - // Create the mountpoint - source, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs) + f, err := os.OpenFile(destination, os.O_CREATE, 0755) if err != nil { return err } + f.Close() - newVolPath, err := filepath.Rel(container.basefs, source) - if err != nil { - return err - } - newVolPath = "/" + newVolPath - - if volPath != newVolPath { - delete(container.Volumes, volPath) - delete(container.VolumesRW, volPath) - } - - container.Volumes[volPath] = destination - container.VolumesRW[volPath] = srcRW - - if err := createIfNotExists(source, volIsDir); err != nil { - return err - } - - // Do not copy or change permissions if we are mounting from the host - if srcRW && !isBindMount { - if err := copyExistingContents(source, destination); err != nil { - return err - } - } return nil } diff --git a/components/engine/daemon/wait.go b/components/engine/daemon/wait.go new file mode 100644 index 0000000000..7224b6231f --- /dev/null +++ b/components/engine/daemon/wait.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "time" + + "github.com/docker/docker/engine" +) + +func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s", job.Name) + } + name := job.Args[0] + if container := daemon.Get(name); container != nil { + status, _ := container.State.WaitStop(-1 * time.Second) + job.Printf("%d\n", status) + return engine.StatusOK + } + return job.Errorf("%s: no such container: %s", job.Name, name) +} diff --git a/components/engine/daemonconfig/README.md b/components/engine/daemonconfig/README.md deleted file mode 100644 index 488e7c7cac..0000000000 --- a/components/engine/daemonconfig/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This directory contains code pertaining to the configuration of the docker deamon - -These are the configuration settings that you pass to the docker daemon when you launch it with say: `docker -d -e lxc` diff --git a/components/engine/daemonconfig/config.go b/components/engine/daemonconfig/config.go deleted file mode 100644 index 1d2bb60dd6..0000000000 --- a/components/engine/daemonconfig/config.go +++ /dev/null @@ -1,82 +0,0 @@ -package daemonconfig - -import ( - "github.com/dotcloud/docker/daemon/networkdriver" - "github.com/dotcloud/docker/engine" - "net" -) - -const ( - defaultNetworkMtu = 1500 - DisableNetworkBridge = "none" -) - -// FIXME: separate runtime configuration from http api configuration -type Config struct { - Pidfile string - Root string - AutoRestart bool - Dns []string - DnsSearch []string - EnableIptables bool - EnableIpForward bool - DefaultIp net.IP - BridgeIface string - BridgeIP string - InterContainerCommunication bool - GraphDriver string - GraphOptions []string - ExecDriver string - Mtu int - DisableNetwork bool - EnableSelinuxSupport bool - Context map[string][]string - Sockets []string -} - -// ConfigFromJob creates and returns a new DaemonConfig object -// by parsing the contents of a job's environment. -func ConfigFromJob(job *engine.Job) *Config { - config := &Config{ - Pidfile: job.Getenv("Pidfile"), - Root: job.Getenv("Root"), - AutoRestart: job.GetenvBool("AutoRestart"), - EnableIptables: job.GetenvBool("EnableIptables"), - EnableIpForward: job.GetenvBool("EnableIpForward"), - BridgeIP: job.Getenv("BridgeIP"), - BridgeIface: job.Getenv("BridgeIface"), - DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), - InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), - GraphDriver: job.Getenv("GraphDriver"), - ExecDriver: job.Getenv("ExecDriver"), - EnableSelinuxSupport: job.GetenvBool("EnableSelinuxSupport"), - } - if graphOpts := job.GetenvList("GraphOptions"); graphOpts != nil { - config.GraphOptions = graphOpts - } - - if dns := job.GetenvList("Dns"); dns != nil { - config.Dns = dns - } - if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil { - config.DnsSearch = dnsSearch - } - if mtu := job.GetenvInt("Mtu"); mtu != 0 { - config.Mtu = mtu - } else { - config.Mtu = GetDefaultNetworkMtu() - } - config.DisableNetwork = config.BridgeIface == DisableNetworkBridge - if sockets := job.GetenvList("Sockets"); sockets != nil { - config.Sockets = sockets - } - - return config -} - -func GetDefaultNetworkMtu() int { - if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { - return iface.MTU - } - return defaultNetworkMtu -} diff --git a/components/engine/docker/client.go b/components/engine/docker/client.go new file mode 100644 index 0000000000..27001cc557 --- /dev/null +++ b/components/engine/docker/client.go @@ -0,0 +1,13 @@ +// +build !daemon + +package main + +import ( + "log" +) + +const CanDaemon = false + +func mainDaemon() { + log.Fatal("This is a client-only binary - running the Docker daemon is not supported.") +} diff --git a/components/engine/docker/daemon.go b/components/engine/docker/daemon.go new file mode 100644 index 0000000000..dc9d56d1d9 --- /dev/null +++ b/components/engine/docker/daemon.go @@ -0,0 +1,81 @@ +// +build daemon + +package main + +import ( + "log" + + "github.com/docker/docker/builtins" + "github.com/docker/docker/daemon" + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/engine" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" +) + +const CanDaemon = true + +var ( + daemonCfg = &daemon.Config{} +) + +func init() { + daemonCfg.InstallFlags() +} + +func mainDaemon() { + if flag.NArg() != 0 { + flag.Usage() + return + } + eng := engine.New() + signal.Trap(eng.Shutdown) + // Load builtins + if err := builtins.Register(eng); err != nil { + log.Fatal(err) + } + + // load the daemon in the background so we can immediately start + // the http api so that connections don't fail while the daemon + // is booting + go func() { + d, err := daemon.NewDaemon(daemonCfg, eng) + if err != nil { + log.Fatal(err) + } + if err := d.Install(eng); err != nil { + log.Fatal(err) + } + // after the daemon is done setting up we can tell the api to start + // accepting connections + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatal(err) + } + }() + // TODO actually have a resolved graphdriver to show? + log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", + dockerversion.VERSION, + dockerversion.GITCOMMIT, + daemonCfg.ExecDriver, + daemonCfg.GraphDriver, + ) + + // Serve api + job := eng.Job("serveapi", flHosts...) + job.SetenvBool("Logging", true) + job.SetenvBool("EnableCors", *flEnableCors) + job.Setenv("Version", dockerversion.VERSION) + job.Setenv("SocketGroup", *flSocketGroup) + + job.SetenvBool("Tls", *flTls) + job.SetenvBool("TlsVerify", *flTlsVerify) + job.Setenv("TlsCa", *flCa) + job.Setenv("TlsCert", *flCert) + job.Setenv("TlsKey", *flKey) + job.SetenvBool("BufferRequests", true) + if err := job.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/components/engine/docker/docker.go b/components/engine/docker/docker.go index 30d43bc6a8..f2b4ca90b1 100644 --- a/components/engine/docker/docker.go +++ b/components/engine/docker/docker.go @@ -6,20 +6,15 @@ import ( "fmt" "io/ioutil" "log" - "net" "os" - "runtime" "strings" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/api/client" - "github.com/dotcloud/docker/builtins" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/opts" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/sysinit" - "github.com/dotcloud/docker/utils" + "github.com/docker/docker/api" + "github.com/docker/docker/api/client" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reexec" + "github.com/docker/docker/utils" ) const ( @@ -28,60 +23,23 @@ const ( defaultCertFile = "cert.pem" ) -var ( - dockerConfDir = os.Getenv("HOME") + "/.docker/" -) - func main() { - if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { - // Running in init mode - sysinit.SysInit() + if reexec.Init() { return } - - var ( - flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") - flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") - flGraphOpts opts.ListOpts - flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") - flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") - bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") - bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") - pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") - flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime") - flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") - flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") - flDns = opts.NewListOpts(opts.ValidateIp4Address) - flDnsSearch = opts.NewListOpts(opts.ValidateDomain) - flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") - flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") - flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") - flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") - flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") - flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") - flHosts = opts.NewListOpts(api.ValidateHost) - flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") - flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") - flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") - flCa = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here") - flCert = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file") - flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file") - flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support") - ) - flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") - flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") - flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") - flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options") - flag.Parse() + // FIXME: validate daemon flags here if *flVersion { showVersion() return } - if flHosts.Len() == 0 { - defaultHost := os.Getenv("DOCKER_HOST") + if *flDebug { + os.Setenv("DEBUG", "1") + } + if len(flHosts) == 0 { + defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) @@ -89,203 +47,69 @@ func main() { if _, err := api.ValidateHost(defaultHost); err != nil { log.Fatal(err) } - flHosts.Set(defaultHost) - } - - if *bridgeName != "" && *bridgeIp != "" { - log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") - } - - if !*flEnableIptables && !*flInterContainerComm { - log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") - } - - if net.ParseIP(*flDefaultIp) == nil { - log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp) - } - - if *flDebug { - os.Setenv("DEBUG", "1") + flHosts = append(flHosts, defaultHost) } if *flDaemon { - if runtime.GOOS != "linux" { - log.Fatalf("The Docker daemon is only supported on linux") - } - if os.Geteuid() != 0 { - log.Fatalf("The Docker daemon needs to be run as root") - } + mainDaemon() + return + } - if flag.NArg() != 0 { - flag.Usage() - return - } + if len(flHosts) > 1 { + log.Fatal("Please specify only one -H") + } + protoAddrParts := strings.SplitN(flHosts[0], "://", 2) - // set up the TempDir to use a canonical path - tmp := os.TempDir() - realTmp, err := utils.ReadSymlinkedDirectory(tmp) + var ( + cli *client.DockerCli + tlsConfig tls.Config + ) + tlsConfig.InsecureSkipVerify = true + + // If we should verify the server, we need to load a trusted ca + if *flTlsVerify { + *flTls = true + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(*flCa) if err != nil { - log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } - os.Setenv("TMPDIR", realTmp) + certPool.AppendCertsFromPEM(file) + tlsConfig.RootCAs = certPool + tlsConfig.InsecureSkipVerify = false + } - // get the canonical path to the Docker root directory - root := *flRoot - var realRoot string - if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { - realRoot = root - } else { - realRoot, err = utils.ReadSymlinkedDirectory(root) - if err != nil { - log.Fatalf("Unable to get the full path to root (%s): %s", root, err) - } - } - if err := checkKernelAndArch(); err != nil { - log.Fatal(err) - } - - eng := engine.New() - // Load builtins - if err := builtins.Register(eng); err != nil { - log.Fatal(err) - } - // load the daemon in the background so we can immediately start - // the http api so that connections don't fail while the daemon - // is booting - go func() { - // Load plugin: httpapi - job := eng.Job("initserver") - job.Setenv("Pidfile", *pidfile) - job.Setenv("Root", realRoot) - job.SetenvBool("AutoRestart", *flAutoRestart) - job.SetenvList("Dns", flDns.GetAll()) - job.SetenvList("DnsSearch", flDnsSearch.GetAll()) - job.SetenvBool("EnableIptables", *flEnableIptables) - job.SetenvBool("EnableIpForward", *flEnableIpForward) - job.Setenv("BridgeIface", *bridgeName) - job.Setenv("BridgeIP", *bridgeIp) - job.Setenv("DefaultIp", *flDefaultIp) - job.SetenvBool("InterContainerCommunication", *flInterContainerComm) - job.Setenv("GraphDriver", *flGraphDriver) - job.SetenvList("GraphOptions", flGraphOpts.GetAll()) - job.Setenv("ExecDriver", *flExecDriver) - job.SetenvInt("Mtu", *flMtu) - job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) - job.SetenvList("Sockets", flHosts.GetAll()) - if err := job.Run(); err != nil { - log.Fatal(err) - } - // after the daemon is done setting up we can tell the api to start - // accepting connections - if err := eng.Job("acceptconnections").Run(); err != nil { - log.Fatal(err) - } - }() - - // TODO actually have a resolved graphdriver to show? - log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", - dockerversion.VERSION, - dockerversion.GITCOMMIT, - *flExecDriver, - *flGraphDriver) - - // Serve api - job := eng.Job("serveapi", flHosts.GetAll()...) - job.SetenvBool("Logging", true) - job.SetenvBool("EnableCors", *flEnableCors) - job.Setenv("Version", dockerversion.VERSION) - job.Setenv("SocketGroup", *flSocketGroup) - - job.SetenvBool("Tls", *flTls) - job.SetenvBool("TlsVerify", *flTlsVerify) - job.Setenv("TlsCa", *flCa) - job.Setenv("TlsCert", *flCert) - job.Setenv("TlsKey", *flKey) - job.SetenvBool("BufferRequests", true) - if err := job.Run(); err != nil { - log.Fatal(err) - } - } else { - if flHosts.Len() > 1 { - log.Fatal("Please specify only one -H") - } - protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) - - var ( - cli *client.DockerCli - tlsConfig tls.Config - ) - tlsConfig.InsecureSkipVerify = true - - // If we should verify the server, we need to load a trusted ca - if *flTlsVerify { + // If tls is enabled, try to load and send client certificates + if *flTls || *flTlsVerify { + _, errCert := os.Stat(*flCert) + _, errKey := os.Stat(*flKey) + if errCert == nil && errKey == nil { *flTls = true - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(*flCa) + cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { - log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) + log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } - certPool.AppendCertsFromPEM(file) - tlsConfig.RootCAs = certPool - tlsConfig.InsecureSkipVerify = false + tlsConfig.Certificates = []tls.Certificate{cert} } + } - // If tls is enabled, try to load and send client certificates - if *flTls || *flTlsVerify { - _, errCert := os.Stat(*flCert) - _, errKey := os.Stat(*flKey) - if errCert == nil && errKey == nil { - *flTls = true - cert, err := tls.LoadX509KeyPair(*flCert, *flKey) - if err != nil { - log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} + if *flTls || *flTlsVerify { + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) + } else { + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) + } + + if err := cli.Cmd(flag.Args()...); err != nil { + if sterr, ok := err.(*utils.StatusError); ok { + if sterr.Status != "" { + log.Println(sterr.Status) } + os.Exit(sterr.StatusCode) } - - if *flTls || *flTlsVerify { - cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) - } else { - cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) - } - - if err := cli.ParseCommands(flag.Args()...); err != nil { - if sterr, ok := err.(*utils.StatusError); ok { - if sterr.Status != "" { - log.Println(sterr.Status) - } - os.Exit(sterr.StatusCode) - } - log.Fatal(err) - } + log.Fatal(err) } } func showVersion() { fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) } - -func checkKernelAndArch() error { - // Check for unsupported architectures - if runtime.GOARCH != "amd64" { - return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) - } - // Check for unsupported kernel versions - // FIXME: it would be cleaner to not test for specific versions, but rather - // test for specific functionalities. - // Unfortunately we can't test for the feature "does not cause a kernel panic" - // without actually causing a kernel panic, so we need this workaround until - // the circumstances of pre-3.8 crashes are clearer. - // For details see http://github.com/dotcloud/docker/issues/407 - if k, err := utils.GetKernelVersion(); err != nil { - log.Printf("WARNING: %s\n", err) - } else { - if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) - } - } - } - return nil -} diff --git a/components/engine/docker/flags.go b/components/engine/docker/flags.go new file mode 100644 index 0000000000..baae40eafc --- /dev/null +++ b/components/engine/docker/flags.go @@ -0,0 +1,42 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = filepath.Join(os.Getenv("HOME"), ".docker") + } +} + +var ( + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") + flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") + flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") + flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") + flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") + flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") + + // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs + flCa *string + flCert *string + flKey *string + flHosts []string +) + +func init() { + flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here") + flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + opts.HostListVar(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") +} diff --git a/components/engine/dockerinit/dockerinit.go b/components/engine/dockerinit/dockerinit.go index 1d0689387a..c5bba782b0 100644 --- a/components/engine/dockerinit/dockerinit.go +++ b/components/engine/dockerinit/dockerinit.go @@ -1,11 +1,12 @@ package main import ( - "github.com/dotcloud/docker/sysinit" + _ "github.com/docker/docker/daemon/execdriver/lxc" + _ "github.com/docker/docker/daemon/execdriver/native" + "github.com/docker/docker/reexec" ) func main() { // Running in init mode - sysinit.SysInit() - return + reexec.Init() } diff --git a/components/engine/docs/Dockerfile b/components/engine/docs/Dockerfile index 329646ed01..a50b396624 100644 --- a/components/engine/docs/Dockerfile +++ b/components/engine/docs/Dockerfile @@ -1,10 +1,10 @@ # -# See the top level Makefile in https://github.com/dotcloud/docker for usage. +# See the top level Makefile in https://github.com/docker/docker for usage. # FROM debian:jessie MAINTAINER Sven Dowideit (@SvenDowideit) -RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git gettext +RUN apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext RUN pip install mkdocs @@ -16,6 +16,9 @@ RUN pip install mkdocs # this version works, the current versions fail in different ways RUN pip install awscli==1.3.9 +# make sure the git clone is not an old cache - we've published old versions a few times now +ENV CACHE_BUST Jul2014 + # get my sitemap.xml branch of mkdocs and use that for now RUN git clone https://github.com/SvenDowideit/mkdocs &&\ cd mkdocs/ &&\ @@ -27,15 +30,20 @@ ADD MAINTAINERS /docs/sources/humans.txt WORKDIR /docs RUN VERSION=$(cat /docs/VERSION) &&\ + MAJOR_MINOR="${VERSION%.*}" &&\ + for i in $(seq $MAJOR_MINOR -0.1 1.0) ; do echo "
  • Version v$i
  • " ; done > /docs/sources/versions.html_fragment &&\ GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\ GITCOMMIT=$(cat /docs/GITCOMMIT) &&\ AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\ + BUILD_DATE=$(date) &&\ sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" /docs/theme/mkdocs/base.html &&\ sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\ sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\ + sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" /docs/theme/mkdocs/base.html &&\ sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html -# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525 +# note, EXPOSE is only last because of https://github.com/docker/docker/issues/3525 EXPOSE 8000 CMD ["mkdocs", "serve"] diff --git a/components/engine/docs/README.md b/components/engine/docs/README.md index 17299401e7..ba1feb50d4 100755 --- a/components/engine/docs/README.md +++ b/components/engine/docs/README.md @@ -5,7 +5,7 @@ Markdown, as implemented by [MkDocs](http://mkdocs.org). The HTML files are built and hosted on `https://docs.docker.com`, and update automatically after each change to the master or release branch of [Docker on -GitHub](https://github.com/dotcloud/docker) thanks to post-commit hooks. The +GitHub](https://github.com/docker/docker) thanks to post-commit hooks. The `docs` branch maps to the "latest" documentation and the `master` (unreleased development) branch maps to the "master" documentation. diff --git a/components/engine/docs/docs-update.py b/components/engine/docs/docs-update.py index 31bb47db3b..2ff305c5ab 100755 --- a/components/engine/docs/docs-update.py +++ b/components/engine/docs/docs-update.py @@ -7,6 +7,7 @@ # ./docs/update.py /usr/bin/docker # +import datetime import re from sys import argv import subprocess @@ -15,6 +16,9 @@ import os.path script, docker_cmd = argv +# date "+%B %Y" +date_string = datetime.date.today().strftime('%B %Y') + def print_usage(outtext, docker_cmd, command): help = "" try: @@ -204,9 +208,9 @@ def update_man_pages(): outtext.write("# HISTORY\n") if history != "": outtext.write(history+"\n") - recent_history_re = re.compile(".*June 2014.*", re.MULTILINE|re.DOTALL) + recent_history_re = re.compile(".*"+date_string+".*", re.MULTILINE|re.DOTALL) if not recent_history_re.match(history): - outtext.write("June 2014, updated by Sven Dowideit \n") + outtext.write(date_string+", updated by Sven Dowideit \n") outtext.close() # main diff --git a/components/engine/docs/man/Dockerfile b/components/engine/docs/man/Dockerfile index 438227df89..9910bd48f9 100644 --- a/components/engine/docs/man/Dockerfile +++ b/components/engine/docs/man/Dockerfile @@ -1,5 +1,7 @@ -FROM fedora:20 -MAINTAINER ipbabble -# Update and install pandoc -RUN yum -y update; yum clean all; -RUN yum -y install pandoc; +FROM golang:1.3 +RUN mkdir -p /go/src/github.com/cpuguy83 +RUN mkdir -p /go/src/github.com/cpuguy83 \ + && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ + && cd /go/src/github.com/cpuguy83/go-md2man \ + && go get -v ./... +CMD ["/go/bin/go-md2man", "--help"] diff --git a/components/engine/docs/man/Dockerfile.5.md b/components/engine/docs/man/Dockerfile.5.md index b0a863f657..9772d4e114 100644 --- a/components/engine/docs/man/Dockerfile.5.md +++ b/components/engine/docs/man/Dockerfile.5.md @@ -96,7 +96,7 @@ or If you use the shell form of the CMD, the executes in /bin/sh -c: **FROM ubuntu** **CMD echo "This is a test." | wc -** - If you run wihtout a shell, then you must express the command as a + If you run without a shell, then you must express the command as a JSON arry and give the full path to the executable. This array form is the preferred form of CMD. All additional parameters must be individually expressed as strings in the array: diff --git a/components/engine/docs/man/README.md b/components/engine/docs/man/README.md index 45f1a91c00..a52e0cbe62 100644 --- a/components/engine/docs/man/README.md +++ b/components/engine/docs/man/README.md @@ -44,27 +44,26 @@ Markdown (*.md) files. # Generating man pages from the Markdown files -The recommended approach for generating the man pages is via a Docker -container. Using the supplied Dockerfile, Docker will create a Fedora based -container and isolate the Pandoc installation. This is a seamless process, -saving you from dealing with Pandoc and dependencies on your own computer. +The recommended approach for generating the man pages is via a Docker +container using the supplied `Dockerfile` to create an image with the correct +environment. This uses `go-md2man`, a pure Go Markdown to man page generator. -## Building the Fedora / Pandoc image +## Building the md2man image -There is a Dockerfile provided in the `docker/docs/man` directory. +There is a `Dockerfile` provided in the `docker/docs/man` directory. -Using this Dockerfile, create a Docker image tagged `fedora/pandoc`: +Using this `Dockerfile`, create a Docker image tagged `docker/md2man`: - docker build -t fedora/pandoc . + docker build -t docker/md2man . -## Utilizing the Fedora / Pandoc image +## Utilizing the image Once the image is built, run a container using the image with *volumes*: - docker run -v //docker/docs/man:/pandoc:rw \ - -w /pandoc -i fedora/pandoc /pandoc/md2man-all.sh + docker run -v //docker/docs/man:/docs:rw \ + -w /docs -i docker/md2man /docs/md2man-all.sh -The Pandoc Docker container will process the Markdown files and generate +The `md2man` Docker container will process the Markdown files and generate the man pages inside the `docker/docs/man/man1` directory using Docker volumes. For more information on Docker volumes see the man page for `docker run` and also look at the article [Sharing Directories via Volumes] diff --git a/components/engine/docs/man/docker-attach.1.md b/components/engine/docs/man/docker-attach.1.md index 1b4e68b65f..7deda6c75e 100644 --- a/components/engine/docs/man/docker-attach.1.md +++ b/components/engine/docs/man/docker-attach.1.md @@ -14,7 +14,7 @@ docker-attach - Attach to a running container If you **docker run** a container in detached mode (**-d**), you can reattach to the detached container with **docker attach** using the container's ID or name. -You can detach from the container again (and leave it running) with `CTRL-q +You can detach from the container again (and leave it running) with `CTRL-p CTRL-q` (for a quiet exit), or `CTRL-c` which will send a SIGKILL to the container, or `CTRL-\` to get a stacktrace of the Docker client when it quits. When you detach from a container the exit code will be returned to @@ -25,7 +25,7 @@ the client. Do not attach STDIN. The default is *false*. **--sig-proxy**=*true*|*false* - Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied. The default is *true*. + Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. # EXAMPLES diff --git a/components/engine/docs/man/docker-commit.1.md b/components/engine/docs/man/docker-commit.1.md index bbd1db21b0..31edcc0397 100644 --- a/components/engine/docs/man/docker-commit.1.md +++ b/components/engine/docs/man/docker-commit.1.md @@ -8,6 +8,7 @@ docker-commit - Create a new image from a container's changes **docker commit** [**-a**|**--author**[=*AUTHOR*]] [**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] CONTAINER [REPOSITORY[:TAG]] # DESCRIPTION @@ -20,8 +21,8 @@ Using an existing container's name or ID you can create a new image. **-m**, **--message**="" Commit message -**-p, --pause**=true - Pause container during commit +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. # EXAMPLES @@ -30,10 +31,11 @@ An existing Fedora based container has had Apache installed while running in interactive mode with the bash shell. Apache is also running. To create a new image run docker ps to find the container's ID and then run: - # docker commit -m= "Added Apache to Fedora base image" \ + # docker commit -m="Added Apache to Fedora base image" \ -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and in June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-info.1.md b/components/engine/docs/man/docker-info.1.md index 2945d61dfe..bf64a7b543 100644 --- a/components/engine/docs/man/docker-info.1.md +++ b/components/engine/docs/man/docker-info.1.md @@ -29,18 +29,14 @@ There are no available options. Here is a sample output: # docker info - Containers: 18 - Images: 95 - Storage Driver: devicemapper - Pool Name: docker-8:1-170408448-pool - Data file: /var/lib/docker/devicemapper/devicemapper/data - Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata - Data Space Used: 9946.3 Mb - Data Space Total: 102400.0 Mb - Metadata Space Used: 9.9 Mb - Metadata Space Total: 2048.0 Mb - Execution Driver: native-0.1 - Kernel Version: 3.10.0-116.el7.x86_64 + Containers: 14 + Images: 52 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Dirs: 80 + Execution Driver: native-0.2 + Kernel Version: 3.13.0-24-generic + Operating System: Ubuntu 14.04 LTS # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) diff --git a/components/engine/docs/man/docker-logout.1.md b/components/engine/docs/man/docker-logout.1.md new file mode 100644 index 0000000000..07dcdcbc33 --- /dev/null +++ b/components/engine/docs/man/docker-logout.1.md @@ -0,0 +1,27 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log the user out from a Docker registry, if no server is +specified "https://index.docker.io/v1/" is the default. If you want to +log out from a private registry you can specify this by adding the server name. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a local registry + + # docker logout localhost:8080 + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-logs.1.md b/components/engine/docs/man/docker-logs.1.md index 5c3df75b9e..1fbd229d5d 100644 --- a/components/engine/docs/man/docker-logs.1.md +++ b/components/engine/docs/man/docker-logs.1.md @@ -8,6 +8,7 @@ docker-logs - Fetch the logs of a container **docker logs** [**-f**|**--follow**[=*false*]] [**-t**|**--timestamps**[=*false*]] +[**--tail**[=*"all"*]] CONTAINER # DESCRIPTION @@ -27,7 +28,11 @@ then continue streaming new output from the container’s stdout and stderr. **-t**, **--timestamps**=*true*|*false* Show timestamps. The default is *false*. +**--tail**="all" + Output the specified number of lines at the end of logs (defaults to all logs) + # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-pause.1.md b/components/engine/docs/man/docker-pause.1.md index e6c0c2455d..7b4b091a06 100644 --- a/components/engine/docs/man/docker-pause.1.md +++ b/components/engine/docs/man/docker-pause.1.md @@ -8,6 +8,18 @@ docker-pause - Pause all processes within a container **docker pause** CONTAINER +# DESCRIPTION + +The `docker pause` command uses the cgroups freezer to suspend all processes in +a container. Traditionally when suspending a process the `SIGSTOP` signal is +used, which is observable by the process being suspended. With the cgroups freezer +the process is unaware, and unable to capture, that it is being suspended, +and subsequently resumed. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + # OPTIONS There are no available options. diff --git a/components/engine/docs/man/docker-ps.1.md b/components/engine/docs/man/docker-ps.1.md index 9264d53a66..bf22d87da5 100644 --- a/components/engine/docs/man/docker-ps.1.md +++ b/components/engine/docs/man/docker-ps.1.md @@ -8,6 +8,7 @@ docker-ps - List containers **docker ps** [**-a**|**--all**[=*false*]] [**--before**[=*BEFORE*]] +[**-f**|**--filter**[=*[]*]] [**-l**|**--latest**[=*false*]] [**-n**[=*-1*]] [**--no-trunc**[=*false*]] @@ -28,6 +29,10 @@ the running containers. **--before**="" Show only container created before Id or Name, include non-running ones. +**-f**, **--filter**=[] + Provide filter values. Valid filters: + exited= - containers with exit code of + **-l**, **--latest**=*true*|*false* Show only the latest created container, include non-running ones. The default is *false*. @@ -68,3 +73,4 @@ the running containers. April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-rm.1.md b/components/engine/docs/man/docker-rm.1.md index 1b45376976..bae6a7ea8c 100644 --- a/components/engine/docs/man/docker-rm.1.md +++ b/components/engine/docs/man/docker-rm.1.md @@ -20,7 +20,7 @@ containers on a host use the **docker ps -a** command. # OPTIONS **-f**, **--force**=*true*|*false* - Force removal of running container. The default is *false*. + Force the removal of a running container (uses SIGKILL). The default is *false*. **-l**, **--link**=*true*|*false* Remove the specified link and not the underlying container. The default is *false*. @@ -49,3 +49,5 @@ command. The use that name as follows: April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-run.1.md b/components/engine/docs/man/docker-run.1.md index e7571ac21a..225fb78cb8 100644 --- a/components/engine/docs/man/docker-run.1.md +++ b/components/engine/docs/man/docker-run.1.md @@ -8,9 +8,12 @@ docker-run - Run a command in a new container **docker run** [**-a**|**--attach**[=*[]*]] [**-c**|**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] [**--cidfile**[=*CIDFILE*]] [**--cpuset**[=*CPUSET*]] [**-d**|**--detach**[=*false*]] +[**--device**[=*[]*]] [**--dns-search**[=*[]*]] [**--dns**[=*[]*]] [**-e**|**--env**[=*[]*]] @@ -27,6 +30,7 @@ docker-run - Run a command in a new container [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--privileged**[=*false*]] +[**--restart**[=*POLICY*]] [**--rm**[=*false*]] [**--sig-proxy**[=*true*]] [**-t**|**--tty**[=*false*]] @@ -67,13 +71,19 @@ the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via **docker run**. -**--cidfile**=*file* - Write the container ID to the file specified. +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cidfile**="" + Write the container ID to the file **--cpuset**="" CPUs in which to allow execution (0-3, 0,1) -**-d**, **-detach**=*true*|*false* +**-d**, **--detach**=*true*|*false* Detached mode. This runs the container in the background. It outputs the new container's ID and any error messages. At any time you can run **docker ps** in the other shell to view a list of the running containers. You can reattach to a @@ -82,18 +92,19 @@ the detached mode, then you cannot use the **-rm** option. When attached in the tty mode, you can detach from a running container without stopping the process by pressing the keys CTRL-P CTRL-Q. +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) **--dns-search**=[] - Set custom dns search domains + Set custom DNS search domains **--dns**=*IP-address* Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this -is the case the **-dns** flags is necessary for every run. +is the case the **--dns** flags is necessary for every run. - -**-e**, **-env**=*environment* +**-e**, **--env**=*environment* Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. @@ -110,8 +121,9 @@ pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a **--entrypoint** and a string to specify the new ENTRYPOINT. + **--env-file**=[] - Read in a line delimited file of ENV variables + Read in a line delimited file of environment variables **--expose**=*port* Expose a port from the container without publishing it to your host. A @@ -120,10 +132,10 @@ developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the **--expose** option with **docker run**, or 3) the container can be started with the **--link**. -**-h**, **-hostname**=*hostname* +**-h**, **--hostname**=*hostname* Sets the container host name that is available inside the container. -**-i**, **-interactive**=*true*|*false* +**-i**, **--interactive**=*true*|*false* When set to true, keep stdin open even if not attached. The default is false. **--link**=*name*:*alias* @@ -136,7 +148,7 @@ which interface and port to use. **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**-m**, **-memory**=*memory-limit* +**-m**, **--memory**=*memory-limit* Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The @@ -165,14 +177,14 @@ and foreground Docker containers. 'container:': reuses another container network stack 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. -**-P**, **-publish-all**=*true*|*false* +**-P**, **--publish-all**=*true*|*false* When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use **docker port**. -**-p**, **-publish**=[] +**-p**, **--publish**=[] Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use **docker port** to see the actual mapping) @@ -190,26 +202,21 @@ outside of a container on the host. **--rm**=*true*|*false* - If set to *true* the container is automatically removed when it exits. The -default is *false*. This option is incompatible with **-d**. - + Automatically remove the container when it exits (incompatible with -d). The default is *false*. **--sig-proxy**=*true*|*false* - When set to true, proxify received signals to the process (even in -non-tty mode). SIGCHLD is not proxied. The default is *true*. + Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. - -**-t**, **-tty**=*true*|*false* +**-t**, **--tty**=*true*|*false* When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false. - -**-u**, **-user**=*username*,*uid* - Set a username or UID for the container. +**-u**, **--user**="" + Username or UID -**-v**, **-volume**=*volume*[:ro|:rw] +**-v**, **--volume**=*volume*[:ro|:rw] Bind mount a volume to the container. The **-v** option can be used one or @@ -233,7 +240,7 @@ default, the volumes are mounted in the same mode (read write or read only) as the reference container. -**-w**, **-workdir**=*directory* +**-w**, **--workdir**=*directory* Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator @@ -241,7 +248,10 @@ can override the working directory by using the **-w** option. **IMAGE** - The image name or ID. + The image name or ID. You can specify a version of an image you'd like to run + the container with by adding image:tag to the command. For example, + `docker run ubuntu:14.04`. + **COMMAND** @@ -338,7 +348,7 @@ fedora-data image: Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the -DATA container in yet another container via the fedora-container1 intermidiery +DATA container in yet another container via the fedora-container1 intermediary container, allowing to abstract the actual data source from users of that data: # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash @@ -371,3 +381,4 @@ changes will also be reflected on the host in /var/db. April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-tag.1.md b/components/engine/docs/man/docker-tag.1.md index 041c9e1cb5..a42ebe7702 100644 --- a/components/engine/docs/man/docker-tag.1.md +++ b/components/engine/docs/man/docker-tag.1.md @@ -7,7 +7,7 @@ docker-tag - Tag an image into a repository # SYNOPSIS **docker tag** [**-f**|**--force**[=*false*]] - IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] + IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] # DESCRIPTION This will give a new alias to an image in the repository. This refers to the @@ -29,7 +29,7 @@ separated by a ':' **TAG** The tag you are assigning to the image. Though this is arbitrary it is -recommended to be used for a version to disinguish images with the same name. +recommended to be used for a version to distinguish images with the same name. Note that here TAG is a part of the overall name or "tag". # OPTIONS @@ -56,3 +56,4 @@ registry you must tag it with the registry hostname and port (if needed). April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit diff --git a/components/engine/docs/man/docker-unpause.1.md b/components/engine/docs/man/docker-unpause.1.md index 8949548b67..dfce16324e 100644 --- a/components/engine/docs/man/docker-unpause.1.md +++ b/components/engine/docs/man/docker-unpause.1.md @@ -8,6 +8,15 @@ docker-unpause - Unpause all processes within a container **docker unpause** CONTAINER +# DESCRIPTION + +The `docker unpause` command uses the cgroups freezer to un-suspend all +processes in a container. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + # OPTIONS There are no available options. diff --git a/components/engine/docs/man/docker.1.md b/components/engine/docs/man/docker.1.md index a7a826ed9f..3932097255 100644 --- a/components/engine/docs/man/docker.1.md +++ b/components/engine/docs/man/docker.1.md @@ -64,9 +64,6 @@ unix://[/path/to/socket] to use. **-p**="" Path to use for daemon PID file. Default is `/var/run/docker.pid` -**-r**=*true*|*false* - Restart previously running containers. Default is true. - **-s**="" Force the Docker runtime to use a specific storage driver. @@ -74,7 +71,7 @@ unix://[/path/to/socket] to use. Print version information and quit. Default is false. **--selinux-enabled**=*true*|*false* - Enable selinux support. Default is false. + Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver. # COMMANDS **docker-attach(1)** @@ -124,6 +121,9 @@ inside it) **docker-login(1)** Register or Login to a Docker registry server +**docker-logout(1)** + Log the user out of a Docker registry server + **docker-logs(1)** Fetch the logs of a container diff --git a/components/engine/docs/man/md2man-all.sh b/components/engine/docs/man/md2man-all.sh index 12d84de232..97c65c93bc 100755 --- a/components/engine/docs/man/md2man-all.sh +++ b/components/engine/docs/man/md2man-all.sh @@ -18,5 +18,5 @@ for FILE in *.md; do continue fi mkdir -p "./man${num}" - pandoc -s -t man "$FILE" -o "./man${num}/${name}" + go-md2man -in "$FILE" -out "./man${num}/${name}" done diff --git a/components/engine/docs/mkdocs.yml b/components/engine/docs/mkdocs.yml index f4ebcb68fe..c45b717d91 100755 --- a/components/engine/docs/mkdocs.yml +++ b/components/engine/docs/mkdocs.yml @@ -6,7 +6,7 @@ site_favicon: img/favicon.png dev_addr: '0.0.0.0:8000' -repo_url: https://github.com/dotcloud/docker/ +repo_url: https://github.com/docker/docker/ docs_dir: sources @@ -83,6 +83,7 @@ pages: - ['articles/security.md', 'Articles', 'Security'] - ['articles/https.md', 'Articles', 'Running Docker with HTTPS'] - ['articles/host_integration.md', 'Articles', 'Automatically starting Containers'] +- ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification'] - ['articles/using_supervisord.md', 'Articles', 'Using Supervisor'] - ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine'] - ['articles/puppet.md', 'Articles', 'Using Puppet'] @@ -104,6 +105,7 @@ pages: - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] - ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] +- ['reference/api/docker_remote_api_v1.14.md', 'Reference', 'Docker Remote API v1.14'] - ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13'] - ['reference/api/docker_remote_api_v1.12.md', 'Reference', 'Docker Remote API v1.12'] - ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11'] @@ -119,7 +121,6 @@ pages: - ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] - ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] -- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker Hub OAuth API'] - ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API'] - ['jsearch.md', '**HIDDEN**'] diff --git a/components/engine/docs/release.sh b/components/engine/docs/release.sh index f6dc2ec59f..ba309aaea9 100755 --- a/components/engine/docs/release.sh +++ b/components/engine/docs/release.sh @@ -27,6 +27,10 @@ if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then fi fi +# Remove the last version - 1.0.2-dev -> 1.0 +MAJOR_MINOR="v${VERSION%.*}" +export MAJOR_MINOR + export BUCKET=$AWS_S3_BUCKET export AWS_CONFIG_FILE=$(pwd)/awsconfig @@ -69,7 +73,8 @@ upload_current_documentation() { # a really complicated way to send only the files we want # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go - endings=( json html xml css js gif png JPG ttf svg woff) + # versions.html_fragment + endings=( json html xml css js gif png JPG ttf svg woff html_fragment ) for i in ${endings[@]}; do include="" for j in ${endings[@]}; do @@ -101,13 +106,16 @@ upload_current_documentation() { } setup_s3 -build_current_documentation -upload_current_documentation -# Remove the last version - 1.0.2-dev -> 1.0 -MAJOR_MINOR="v${VERSION%.*}" +# Default to only building the version specific docs so we don't clober the latest by accident with old versions +if [ "$BUILD_ROOT" == "yes" ]; then + echo "Building root documentation" + build_current_documentation + upload_current_documentation +fi #build again with /v1.0/ prefix sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml +echo "Building the /$MAJOR_MINOR/ documentation" build_current_documentation upload_current_documentation "/$MAJOR_MINOR/" diff --git a/components/engine/docs/sources/articles/baseimages.md b/components/engine/docs/sources/articles/baseimages.md index c795b7a0a7..bc677eb8a3 100644 --- a/components/engine/docs/sources/articles/baseimages.md +++ b/components/engine/docs/sources/articles/baseimages.md @@ -33,13 +33,13 @@ It can be as simple as this to create an Ubuntu base image: There are more example scripts for creating base images in the Docker GitHub Repo: - - [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh) + - [BusyBox](https://github.com/docker/docker/blob/master/contrib/mkimage-busybox.sh) - CentOS / Scientific Linux CERN (SLC) [on Debian/Ubuntu]( - https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh) or + https://github.com/docker/docker/blob/master/contrib/mkimage-rinse.sh) or [on CentOS/RHEL/SLC/etc.]( - https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh) + https://github.com/docker/docker/blob/master/contrib/mkimage-yum.sh) - [Debian / Ubuntu]( - https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh) + https://github.com/docker/docker/blob/master/contrib/mkimage-debootstrap.sh) ## Creating a simple base image using `scratch` @@ -52,7 +52,7 @@ which you can `docker pull`. You can then use that image to base your new minimal containers `FROM`: FROM scratch - ADD true-asm /true + COPY true-asm /true CMD ["/true"] The Dockerfile above is from extremely minimal image - [tianon/true]( diff --git a/components/engine/docs/sources/articles/certificates.md b/components/engine/docs/sources/articles/certificates.md new file mode 100644 index 0000000000..90d3f1b356 --- /dev/null +++ b/components/engine/docs/sources/articles/certificates.md @@ -0,0 +1,114 @@ +page_title: Using certificates for repository client verification +page_description: How to set up and use certificates with a registry to verify access +page_keywords: Usage, registry, repository, client, root, certificate, docker, apache, ssl, tls, documentation, examples, articles, tutorials + +# Using certificates for repository client verification + +In [Running Docker with HTTPS](/articles/https), you learned that, by default, +Docker runs via a non-networked Unix socket and TLS must be enabled in order +to have the Docker client and the daemon communicate securely over HTTPS. + +Now, you will see how to allow the Docker registry (i.e., *a server*) to +verify that the Docker daemon (i.e., *a client*) has the right to access the +images being hosted with *certificate-based client-server authentication*. + +We will show you how to install a Certificate Authority (CA) root certificate +for the registry and how to set the client TLS certificate for verification. + +## Understanding the configuration + +A custom certificate is configured by creating a directory under +`/etc/docker/certs.d` using the same name as the registry's hostname (e.g., +`localhost`). All `*.crt` files are added to this directory as CA roots. + +> **Note:** +> In the absence of any root certificate authorities, Docker +> will use the system default (i.e., host's root CA set). + +The presence of one or more `.key/cert` pairs indicates to Docker +that there are custom certificates required for access to the desired +repository. + +> **Note:** +> If there are multiple certificates, each will be tried in alphabetical +> order. If there is an authentication error (e.g., 403, 5xx, etc.), Docker +> will continue to try with the next certificate. + +Our example is set up like this: + + /etc/docker/certs.d/ <-- Certificate directory + └── localhost <-- Hostname + ├── client.cert <-- Client certificate + ├── client.key <-- Client key + └── localhost.crt <-- Registry certificate + +## Creating the client certificates + +You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA +key and then use the key to create the certificate request. + + $ openssl genrsa -out client.key 1024 + $ openssl req -new -x509 -text -key client.key -out client.cert + +> **Warning:**: +> Using TLS and managing a CA is an advanced topic. +> You should be familiar with OpenSSL, x509, and TLS before +> attempting to use them in production. + +> **Warning:** +> These TLS commands will only generate a working set of certificates on Linux. +> The version of OpenSSL in Mac OS X is incompatible with the type of +> certificate Docker requires. + +## Testing the verification setup + +You can test this setup by using Apache to host a Docker registry. +For this purpose, you can copy a registry tree (containing images) inside +the Apache root. + +> **Note:** +> You can find such an example [here]( +> http://people.gnome.org/~alexl/v1.tar.gz) - which contains the busybox image. + +Once you set up the registry, you can use the following Apache configuration +to implement certificate-based protection. + + # This must be in the root context, otherwise it causes a re-negotiation + # which is not supported by the TLS implementation in go + SSLVerifyClient optional_no_ca + + + Action cert-protected /cgi-bin/cert.cgi + SetHandler cert-protected + + Header set x-docker-registry-version "0.6.2" + SetEnvIf Host (.*) custom_host=$1 + Header set X-Docker-Endpoints "%{custom_host}e" + + +Save the above content as `/etc/httpd/conf.d/registry.conf`, and +continue with creating a `cert.cgi` file under `/var/www/cgi-bin/`. + + #!/bin/bash + if [ "$HTTPS" != "on" ]; then + echo "Status: 403 Not using SSL" + echo "x-docker-registry-version: 0.6.2" + echo + exit 0 + fi + if [ "$SSL_CLIENT_VERIFY" == "NONE" ]; then + echo "Status: 403 Client certificate invalid" + echo "x-docker-registry-version: 0.6.2" + echo + exit 0 + fi + echo "Content-length: $(stat --printf='%s' $PATH_TRANSLATED)" + echo "x-docker-registry-version: 0.6.2" + echo "X-Docker-Endpoints: $SERVER_NAME" + echo "X-Docker-Size: 0" + echo + + cat $PATH_TRANSLATED + +This CGI script will ensure that all requests to `/v1` *without* a valid +certificate will be returned with a `403` (i.e., HTTP forbidden) error. diff --git a/components/engine/docs/sources/articles/cfengine_process_management.md b/components/engine/docs/sources/articles/cfengine_process_management.md index 6bb4df66ae..a9441a6d35 100644 --- a/components/engine/docs/sources/articles/cfengine_process_management.md +++ b/components/engine/docs/sources/articles/cfengine_process_management.md @@ -65,13 +65,12 @@ The first two steps can be done as part of a Dockerfile, as follows. FROM ubuntu MAINTAINER Eystein Måløy Stenberg - RUN apt-get -y install wget lsb-release unzip ca-certificates + RUN apt-get update && apt-get install -y wget lsb-release unzip ca-certificates # install latest CFEngine RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add - RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list - RUN apt-get update - RUN apt-get install cfengine-community + RUN apt-get update && apt-get install -y cfengine-community # install cfe-docker process management policy RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/ @@ -80,7 +79,7 @@ The first two steps can be done as part of a Dockerfile, as follows. RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip # apache2 and openssh are just for testing purposes, install your own apps here - RUN apt-get -y install openssh-server apache2 + RUN apt-get update && apt-get install -y openssh-server apache2 RUN mkdir -p /var/run/sshd RUN echo "root:password" | chpasswd # need a password for ssh diff --git a/components/engine/docs/sources/articles/dsc.md b/components/engine/docs/sources/articles/dsc.md index 94f5e9d4db..5e05c40c14 100644 --- a/components/engine/docs/sources/articles/dsc.md +++ b/components/engine/docs/sources/articles/dsc.md @@ -8,7 +8,7 @@ Windows PowerShell Desired State Configuration (DSC) is a configuration management tool that extends the existing functionality of Windows PowerShell. DSC uses a declarative syntax to define the state in which a target should be configured. More information about PowerShell DSC can be found at -http://technet.microsoft.com/en-us/library/dn249912.aspx. +[http://technet.microsoft.com/en-us/library/dn249912.aspx](http://technet.microsoft.com/en-us/library/dn249912.aspx). ## Requirements @@ -17,14 +17,14 @@ To use this guide you'll need a Windows host with PowerShell v4.0 or newer. The included DSC configuration script also uses the official PPA so only an Ubuntu target is supported. The Ubuntu target must already have the required OMI Server and PowerShell DSC for Linux providers installed. More -information can be found at https://github.com/MSFTOSSMgmt/WPSDSCLinux. The -source repository listed below also includes PowerShell DSC for Linux +information can be found at [https://github.com/MSFTOSSMgmt/WPSDSCLinux](https://github.com/MSFTOSSMgmt/WPSDSCLinux). +The source repository listed below also includes PowerShell DSC for Linux installation and init scripts along with more detailed installation information. ## Installation The DSC configuration example source is available in the following repository: -https://github.com/anweiss/DockerClientDSC. It can be cloned with: +[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). It can be cloned with: $ git clone https://github.com/anweiss/DockerClientDSC.git @@ -37,15 +37,18 @@ be used to establish the required CIM session(s) and execute the `Set-DscConfiguration` cmdlet. More detailed usage information can be found at -https://github.com/anweiss/DockerClientDSC. +[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). -### Run Configuration +### Install Docker The Docker installation configuration is equivalent to running: ``` -apt-get install docker.io -ln -sf /usr/bin/docker.io /usr/local/bin/docker -sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io +apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys\ +36A1D7869245C8950F966E92D8576A8BA88D21E9 +sh -c "echo deb https://get.docker.io/ubuntu docker main\ +> /etc/apt/sources.list.d/docker.list" +apt-get update +apt-get install lxc-docker ``` Ensure that your current working directory is set to the `DockerClientDSC` @@ -83,35 +86,82 @@ file and execute configurations against multiple nodes as such: ``` ### Images -Image configuration is equivalent to running: `docker pull [image]`. +Image configuration is equivalent to running: `docker pull [image]` or +`docker rmi -f [IMAGE]`. -Using the same Run Configuration steps defined above, execute `DockerClient` -with the `Image` parameter: +Using the same steps defined above, execute `DockerClient` with the `Image` +parameter and apply the configuration: ```powershell -DockerClient -Hostname "myhost" -Image node +DockerClient -Hostname "myhost" -Image "node" +.\RunDockerClientConfig.ps1 -Hostname "myhost" ``` -The configuration process can be initiated as before: +You can also configure the host to pull multiple images: ```powershell +DockerClient -Hostname "myhost" -Image "node","mongo" .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` +To remove images, use a hashtable as follows: + +```powershell +DockerClient -Hostname "myhost" -Image @{Name="node"; Remove=$true} +.\RunDockerClientConfig.ps1 -Hostname $hostname +``` + ### Containers Container configuration is equivalent to running: -`docker run -d --name="[containername]" [image] '[command]'`. -Using the same Run Configuration steps defined above, execute `DockerClient` -with the `Image`, `ContainerName`, and `Command` parameters: +``` +docker run -d --name="[containername]" -p '[port]' -e '[env]' --link '[link]'\ +'[image]' '[command]' +``` +or -```powershell -DockerClient -Hostname "myhost" -Image node -ContainerName "helloworld" ` --Command 'echo "Hello World!"' +``` +docker rm -f [containername] ``` -The configuration process can be initiated as before: +To create or remove containers, you can use the `Container` parameter with one +or more hashtables. The hashtable(s) passed to this parameter can have the +following properties: + +- Name (required) +- Image (required unless Remove property is set to `$true`) +- Port +- Env +- Link +- Command +- Remove + +For example, create a hashtable with the settings for your container: ```powershell +$webContainer = @{Name="web"; Image="anweiss/docker-platynem"; Port="80:80"} +``` + +Then, using the same steps defined above, execute +`DockerClient` with the `-Image` and `-Container` parameters: + +```powershell +DockerClient -Hostname "myhost" -Image node -Container $webContainer .\RunDockerClientConfig.ps1 -Hostname "myhost" ``` + +Existing containers can also be removed as follows: + +```powershell +$containerToRemove = @{Name="web"; Remove=$true} +DockerClient -Hostname "myhost" -Container $containerToRemove +.\RunDockerClientConfig.ps1 -Hostname "myhost" +``` + +Here is a hashtable with all of the properties that can be used to create a +container: + +```powershell +$containerProps = @{Name="web"; Image="node:latest"; Port="80:80"; ` +Env="PORT=80"; Link="db:db"; Command="grunt"} +``` \ No newline at end of file diff --git a/components/engine/docs/sources/articles/https.md b/components/engine/docs/sources/articles/https.md index b6ae4ef37d..739b724c84 100644 --- a/components/engine/docs/sources/articles/https.md +++ b/components/engine/docs/sources/articles/https.md @@ -1,23 +1,28 @@ -page_title: Docker HTTPS Setup -page_description: How to setup docker with https -page_keywords: docker, example, https, daemon +page_title: Running Docker with HTTPS +page_description: How to setup and run Docker with HTTPS +page_keywords: docker, docs, article, example, https, daemon, tls, ca, certificate # Running Docker with https By default, Docker runs via a non-networked Unix socket. It can also optionally communicate using a HTTP socket. -If you need Docker reachable via the network in a safe manner, you can -enable TLS by specifying the tlsverify flag and pointing Docker's -tlscacert flag to a trusted CA certificate. +If you need Docker to be reachable via the network in a safe manner, you can +enable TLS by specifying the `tlsverify` flag and pointing Docker's +`tlscacert` flag to a trusted CA certificate. -In daemon mode, it will only allow connections from clients -authenticated by a certificate signed by that CA. In client mode, it -will only connect to servers with a certificate signed by that CA. +In the daemon mode, it will only allow connections from clients +authenticated by a certificate signed by that CA. In the client mode, +it will only connect to servers with a certificate signed by that CA. > **Warning**: -> Using TLS and managing a CA is an advanced topic. Please make you self -> familiar with openssl, x509 and tls before using it in production. +> Using TLS and managing a CA is an advanced topic. Please familiarize yourself +> with OpenSSL, x509 and TLS before using it in production. + +> **Warning**: +> These TLS commands will only generate a working set of certificates on Linux. +> Mac OS X comes with a version of OpenSSL that is incompatible with the +> certificates that Docker requires. ## Create a CA, server and client keys with OpenSSL @@ -25,29 +30,67 @@ First, initialize the CA serial file and generate CA private and public keys: $ echo 01 > ca.srl - $ openssl genrsa -des3 -out ca-key.pem + $ openssl genrsa -des3 -out ca-key.pem 2048 + Generating RSA private key, 2048 bit long modulus + ......+++ + ...............+++ + e is 65537 (0x10001) + Enter pass phrase for ca-key.pem: + Verifying - Enter pass phrase for ca-key.pem: $ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem + Enter pass phrase for ca-key.pem: + You are about to be asked to enter information that will be incorporated + into your certificate request. + What you are about to enter is what is called a Distinguished Name or a DN. + There are quite a few fields but you can leave some blank + For some fields there will be a default value, + If you enter '.', the field will be left blank. + ----- + Country Name (2 letter code) [AU]: + State or Province Name (full name) [Some-State]:Queensland + Locality Name (eg, city) []:Brisbane + Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc + Organizational Unit Name (eg, section) []:Boot2Docker + Common Name (e.g. server FQDN or YOUR name) []:your.host.com + Email Address []:Sven@home.org.au Now that we have a CA, you can create a server key and certificate -signing request. Make sure that "Common Name (e.g., server FQDN or YOUR -name)" matches the hostname you will use to connect to Docker or just -use `\*` for a certificate valid for any hostname: +signing request (CSR). Make sure that "Common Name" (i.e. server FQDN or YOUR +name) matches the hostname you will use to connect to Docker: - $ openssl genrsa -des3 -out server-key.pem - $ openssl req -new -key server-key.pem -out server.csr + $ openssl genrsa -des3 -out server-key.pem 2048 + Generating RSA private key, 2048 bit long modulus + ......................................................+++ + ............................................+++ + e is 65537 (0x10001) + Enter pass phrase for server-key.pem: + Verifying - Enter pass phrase for server-key.pem: + $ openssl req -subj '/CN=' -new -key server-key.pem -out server.csr + Enter pass phrase for server-key.pem: -Next we're going to sign the key with our CA: +Next, we're going to sign the key with our CA: $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \ -out server-cert.pem + Signature ok + subject=/CN=your.host.com + Getting CA Private Key + Enter pass phrase for ca-key.pem: For client authentication, create a client key and certificate signing request: - $ openssl genrsa -des3 -out client-key.pem - $ openssl req -new -key client-key.pem -out client.csr + $ openssl genrsa -des3 -out key.pem 2048 + Generating RSA private key, 2048 bit long modulus + ...............................................+++ + ...............................................................+++ + e is 65537 (0x10001) + Enter pass phrase for key.pem: + Verifying - Enter pass phrase for key.pem: + $ openssl req -subj '/CN=client' -new -key key.pem -out client.csr + Enter pass phrase for key.pem: -To make the key suitable for client authentication, create a extensions +To make the key suitable for client authentication, create an extensions config file: $ echo extendedKeyUsage = clientAuth > extfile.cnf @@ -55,34 +98,57 @@ config file: Now sign the key: $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \ - -out client-cert.pem -extfile extfile.cnf + -out cert.pem -extfile extfile.cnf + Signature ok + subject=/CN=client + Getting CA Private Key + Enter pass phrase for ca-key.pem: -Finally you need to remove the passphrase from the client and server -key: +Finally, you need to remove the passphrase from the client and server key: $ openssl rsa -in server-key.pem -out server-key.pem - $ openssl rsa -in client-key.pem -out client-key.pem + Enter pass phrase for server-key.pem: + writing RSA key + $ openssl rsa -in key.pem -out key.pem + Enter pass phrase for key.pem: + writing RSA key Now you can make the Docker daemon only accept connections from clients providing a certificate trusted by our CA: $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ - -H=0.0.0.0:2375 + -H=0.0.0.0:2376 To be able to connect to Docker and validate its certificate, you now need to provide your client keys, certificates and trusted CA: - $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \ - -H=dns-name-of-docker-host:2375 + $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \ + -H=dns-name-of-docker-host:2376 version + +> **Note**: +> Docker over TLS should run on TCP port 2376. > **Warning**: -> As shown in the example above, you don't have to run the -> `docker` client with `sudo` or -> the `docker` group when you use certificate -> authentication. That means anyone with the keys can give any -> instructions to your Docker daemon, giving them root access to the -> machine hosting the daemon. Guard these keys as you would a root -> password! +> As shown in the example above, you don't have to run the `docker` client +> with `sudo` or the `docker` group when you use certificate authentication. +> That means anyone with the keys can give any instructions to your Docker +> daemon, giving them root access to the machine hosting the daemon. Guard +> these keys as you would a root password! + +## Secure by default + +If you want to secure your Docker client connections by default, you can move +the files to the `.docker` directory in your home directory - and set the +`DOCKER_HOST` variable as well. + + $ cp ca.pem ~/.docker/ca.pem + $ cp cert.pem ~/.docker/cert.pem + $ cp key.pem ~/.docker/key.pem + $ export DOCKER_HOST=tcp://:2376 + +Then you can run Docker with the `--tlsverify` option. + + $ docker --tlsverify ps ## Other modes @@ -91,17 +157,22 @@ Docker in various other modes by mixing the flags. ### Daemon modes - - tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients - - tls, tlscert, tlskey: Do not authenticate clients + - `tlsverify`, `tlscacert`, `tlscert`, `tlskey` set: Authenticate clients + - `tls`, `tlscert`, `tlskey`: Do not authenticate clients ### Client modes - - tls: Authenticate server based on public/default CA pool - - tlsverify, tlscacert: Authenticate server based on given CA - - tls, tlscert, tlskey: Authenticate with client certificate, do not + - `tls`: Authenticate server based on public/default CA pool + - `tlsverify`, `tlscacert`: Authenticate server based on given CA + - `tls`, `tlscert`, `tlskey`: Authenticate with client certificate, do not authenticate server based on given CA - - tlsverify, tlscacert, tlscert, tlskey: Authenticate with client - certificate, authenticate server based on given CA + - `tlsverify`, `tlscacert`, `tlscert`, `tlskey`: Authenticate with client + certificate and authenticate server based on given CA -The client will send its client certificate if found, so you just need -to drop your keys into ~/.docker/.pem +If found, the client will send its client certificate, so you just need +to drop your keys into `~/.docker/.pem`. Alternatively, +if you want to store your keys in another location, you can specify that +location using the environment variable `DOCKER_CERT_PATH`. + + $ export DOCKER_CERT_PATH=${HOME}/.docker/zone1/ + $ docker --tlsverify ps diff --git a/components/engine/docs/sources/articles/networking.md b/components/engine/docs/sources/articles/networking.md index bf46b90ea2..f9aa2d26d3 100644 --- a/components/engine/docs/sources/articles/networking.md +++ b/components/engine/docs/sources/articles/networking.md @@ -170,12 +170,41 @@ above, will make `/etc/resolv.conf` inside of each container look like the `/etc/resolv.conf` of the host machine where the `docker` daemon is running. The options then modify this default configuration. +## Communication between containers and the wider world + + + +Whether a container can talk to the world is governed by one main factor. + +Is the host machine willing to forward IP packets? This is governed +by the `ip_forward` system parameter. Packets can only pass between +containers if this parameter is `1`. Usually you will simply leave +the Docker server at its default setting `--ip-forward=true` and +Docker will go set `ip_forward` to `1` for you when the server +starts up. To check the setting or turn it on manually: + + # Usually not necessary: turning on forwarding, + # on the host where your Docker server is running + + $ cat /proc/sys/net/ipv4/ip_forward + 0 + $ sudo echo 1 > /proc/sys/net/ipv4/ip_forward + $ cat /proc/sys/net/ipv4/ip_forward + 1 + +Many using Docker will want `ip_forward` to be on, to at +least make communication *possible* between containers and +the wider world. + +May also be needed for inter-container communication if you are +in a multiple bridge setup. + ## Communication between containers Whether two containers can communicate is governed, at the operating -system level, by three factors. +system level, by two factors. 1. Does the network topology even connect the containers' network interfaces? By default Docker will attach all containers to a @@ -183,32 +212,14 @@ system level, by three factors. between them. See the later sections of this document for other possible topologies. -2. Is the host machine willing to forward IP packets? This is governed - by the `ip_forward` system parameter. Packets can only pass between - containers if this parameter is `1`. Usually you will simply leave - the Docker server at its default setting `--ip-forward=true` and - Docker will go set `ip_forward` to `1` for you when the server - starts up. To check the setting or turn it on manually: - - # Usually not necessary: turning on forwarding, - # on the host where your Docker server is running - - $ cat /proc/sys/net/ipv4/ip_forward - 0 - $ sudo echo 1 > /proc/sys/net/ipv4/ip_forward - $ cat /proc/sys/net/ipv4/ip_forward - 1 - -3. Do your `iptables` allow this particular connection to be made? +2. Do your `iptables` allow this particular connection to be made? Docker will never make changes to your system `iptables` rules if you set `--iptables=false` when the daemon starts. Otherwise the Docker server will add a default rule to the `FORWARD` chain with a blanket `ACCEPT` policy if you retain the default `--icc=true`, or else will set the policy to `DROP` if `--icc=false`. -Nearly everyone using Docker will want `ip_forward` to be on, to at -least make communication *possible* between containers. But it is a -strategic question whether to leave `--icc=true` or change it to +It is a strategic question whether to leave `--icc=true` or change it to `--icc=false` (on Ubuntu, by editing the `DOCKER_OPTS` variable in `/etc/default/docker` and restarting the Docker server) so that `iptables` will protect other containers — and the main host — from @@ -539,7 +550,7 @@ values. It also allows the container to access local network services like D-bus. This can lead to processes in the container being able to do unexpected things like - [restart your computer](https://github.com/dotcloud/docker/issues/6401). + [restart your computer](https://github.com/docker/docker/issues/6401). You should use this option with caution. * `--net=container:NAME_or_ID` — Tells Docker to put this container's @@ -720,3 +731,14 @@ usual containers. But unless you have very specific networking needs that drive you to such a solution, it is probably far preferable to use `--icc=false` to lock down inter-container communication, as we explored earlier. + +## Editing networking config files + +Starting with Docker v.1.2.0, you can now edit `/etc/hosts`, `/etc/hostname` +and `/etc/resolve.conf` in a running container. This is useful if you need +to install bind or other services that might override one of those files. + +Note, however, that changes to these files will not be saved by +`docker commit`, nor will they be saved during `docker run`. +That means they won't be saved in the image, nor will they persist when a +container is restarted; they will only "stick" in a running container. diff --git a/components/engine/docs/sources/articles/runmetrics.md b/components/engine/docs/sources/articles/runmetrics.md index 9c871a24f6..b78de2403e 100644 --- a/components/engine/docs/sources/articles/runmetrics.md +++ b/components/engine/docs/sources/articles/runmetrics.md @@ -363,9 +363,9 @@ container, we need to: - Execute `ip netns exec ....` Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find -the cgroup of a pprocess running in the container of which you want to +the cgroup of a process running in the container of which you want to measure network usage. From there, you can examine the pseudo-file named -`tasks`, which containes the PIDs that are in the +`tasks`, which contains the PIDs that are in the control group (i.e. in the container). Pick any one of them. Putting everything together, if the "short ID" of a container is held in diff --git a/components/engine/docs/sources/articles/security.md b/components/engine/docs/sources/articles/security.md index dcc61f386c..12f7b350ec 100644 --- a/components/engine/docs/sources/articles/security.md +++ b/components/engine/docs/sources/articles/security.md @@ -196,7 +196,7 @@ to the host. This won't affect regular web apps; but malicious users will find that the arsenal at their disposal has shrunk considerably! By default Docker drops all capabilities except [those -needed](https://github.com/dotcloud/docker/blob/master/daemon/execdriver/native/template/default_template.go), +needed](https://github.com/docker/docker/blob/master/daemon/execdriver/native/template/default_template.go), a whitelist instead of a blacklist approach. You can see a full list of available capabilities in [Linux manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html). @@ -204,7 +204,7 @@ manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html). Of course, you can always enable extra capabilities if you really need them (for instance, if you want to use a FUSE-based filesystem), but by default, Docker containers use only a -[whitelist](https://github.com/dotcloud/docker/blob/master/daemon/execdriver/native/template/default_template.go) +[whitelist](https://github.com/docker/docker/blob/master/daemon/execdriver/native/template/default_template.go) of kernel capabilities by default. ## Other Kernel Security Features diff --git a/components/engine/docs/sources/articles/using_supervisord.md b/components/engine/docs/sources/articles/using_supervisord.md index 91b8976d78..10f32c7d1b 100644 --- a/components/engine/docs/sources/articles/using_supervisord.md +++ b/components/engine/docs/sources/articles/using_supervisord.md @@ -28,18 +28,14 @@ new image. FROM ubuntu:13.04 MAINTAINER examples@docker.com - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list - RUN apt-get update - RUN apt-get upgrade -y ## Installing Supervisor We can now install our SSH and Apache daemons as well as Supervisor in our container. - RUN apt-get install -y openssh-server apache2 supervisor - RUN mkdir -p /var/run/sshd - RUN mkdir -p /var/log/supervisor + RUN apt-get update && apt-get install -y openssh-server apache2 supervisor + RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor Here we're installing the `openssh-server`, `apache2` and `supervisor` @@ -52,7 +48,7 @@ Now let's add a configuration file for Supervisor. The default file is called `supervisord.conf` and is located in `/etc/supervisor/conf.d/`. - ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf + COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf Let's see what is inside our `supervisord.conf` file. diff --git a/components/engine/docs/sources/contributing/contributing.md b/components/engine/docs/sources/contributing/contributing.md index dd764eb855..7d65a0479c 100644 --- a/components/engine/docs/sources/contributing/contributing.md +++ b/components/engine/docs/sources/contributing/contributing.md @@ -7,18 +7,18 @@ page_keywords: contributing, docker, documentation, help, guideline Want to hack on Docker? Awesome! The repository includes [all the instructions you need to get started]( -https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). +https://github.com/docker/docker/blob/master/CONTRIBUTING.md). The [developer environment Dockerfile]( -https://github.com/dotcloud/docker/blob/master/Dockerfile) +https://github.com/docker/docker/blob/master/Dockerfile) specifies the tools and versions used to test and build Docker. If you're making changes to the documentation, see the [README.md]( -https://github.com/dotcloud/docker/blob/master/docs/README.md). +https://github.com/docker/docker/blob/master/docs/README.md). The [documentation environment Dockerfile]( -https://github.com/dotcloud/docker/blob/master/docs/Dockerfile) +https://github.com/docker/docker/blob/master/docs/Dockerfile) specifies the tools and versions used to build the Documentation. Further interesting details can be found in the [Packaging hints]( -https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md). +https://github.com/docker/docker/blob/master/hack/PACKAGERS.md). diff --git a/components/engine/docs/sources/contributing/devenvironment.md b/components/engine/docs/sources/contributing/devenvironment.md index 606f9302fc..25a80af4af 100644 --- a/components/engine/docs/sources/contributing/devenvironment.md +++ b/components/engine/docs/sources/contributing/devenvironment.md @@ -32,7 +32,7 @@ Again, you can do it in other ways but you need to do more work. ## Check out the Source - $ git clone https://git@github.com/dotcloud/docker + $ git clone https://git@github.com/docker/docker $ cd docker To checkout a different revision just use `git checkout` @@ -110,7 +110,7 @@ something like this === RUN TestDependencyGraph --- PASS: TestDependencyGraph (0.00 seconds) PASS - ok github.com/dotcloud/docker/utils 0.017s + ok github.com/docker/docker/utils 0.017s If $TESTFLAGS is set in the environment, it is passed as extra arguments to `go test`. You can use this to select certain tests to run, e.g., diff --git a/components/engine/docs/sources/docker-hub/builds.md b/components/engine/docs/sources/docker-hub/builds.md index 1d16353990..e3e2139f0a 100644 --- a/components/engine/docs/sources/docker-hub/builds.md +++ b/components/engine/docs/sources/docker-hub/builds.md @@ -1,65 +1,75 @@ page_title: Automated Builds on Docker Hub page_description: Docker Hub Automated Builds page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, trusted, builds, trusted builds, automated builds + # Automated Builds on Docker Hub -## Automated Builds +## About Automated Builds -*Automated Builds* is a special feature allowing you to specify a source -repository with a `Dockerfile` to be built by the -[Docker Hub](https://hub.docker.com) build clusters. The system will -clone your repository and build the `Dockerfile` using the repository as -the context. The resulting image will then be uploaded to the registry +*Automated Builds* are a special feature of Docker Hub which allow you to +use [Docker Hub's](https://hub.docker.com) build clusters to automatically +create images from a specified `Dockerfile` and a GitHub or Bitbucket repo +(or "context"). The system will clone your repository and build the image +described by the `Dockerfile` using the repository as the context. The +resulting automated image will then be uploaded to the Docker Hub registry and marked as an *Automated Build*. -Automated Builds have a number of advantages. For example, users of -*your* Automated Build can be certain that the resulting image was built -exactly how it claims to be. +Automated Builds have several advantages: -Furthermore, the `Dockerfile` will be available to anyone browsing your repository -on the registry. Another advantage of the Automated Builds feature is the automated -builds. This makes sure that your repository is always up to date. +* Users of *your* Automated Build can trust that the resulting +image was built exactly as specified. + +* The `Dockerfile` will be available to anyone with access to +your repository on the Docker Hub registry. + +* Because the process is automated, Automated Builds help to +make sure that your repository is always up to date. Automated Builds are supported for both public and private repositories -on both [GitHub](http://github.com) and -[BitBucket](https://bitbucket.org/). +on both [GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/). -### Setting up Automated Builds with GitHub +To use Automated Builds, you must have an [account on Docker Hub]( +http://docs.docker.com/userguide/dockerhub/#creating-a-docker-hub-account) +and on GitHub and/or Bitbucket. In either case, the account needs +to be properly validated and activated before you can link to it. -In order to setup an Automated Build, you need to first link your -[Docker Hub](https://hub.docker.com) account with a GitHub one. This -will allow the registry to see your repositories. +## Setting up Automated Builds with GitHub + +In order to set up an Automated Build, you need to first link your +[Docker Hub](https://hub.docker.com) account with a GitHub account. +This will allow the registry to see your repositories. > *Note:* -> We currently request access for *read* and *write* since +> Automated Builds currently require *read* and *write* access since > [Docker Hub](https://hub.docker.com) needs to setup a GitHub service -> hook. Although nothing else is done with your account, this is how -> GitHub manages permissions, sorry! +> hook. We have no choice here, this is how GitHub manages permissions, sorry! +> We do guarantee nothing else will be touched in your account. -Click on the [Automated Builds -tab](https://registry.hub.docker.com/builds/) to get started and then -select [+ Add New](https://registry.hub.docker.com/builds/add/). +To get started, log into your Docker Hub account and click the +"+ Add Repository" button at the upper right of the screen. Then select +[Automated Build](https://registry.hub.docker.com/builds/add/). Select the [GitHub service](https://registry.hub.docker.com/associate/github/). -Then follow the instructions to authorize and link your GitHub account -to Docker Hub. +Then follow the onscreen instructions to authorize and link your +GitHub account to Docker Hub. Once it is linked, you'll be able to +choose a repo from which to create the Automatic Build. -#### Creating an Automated Build +### Creating an Automated Build -You can [create an Automated Build](https://registry.hub.docker.com/builds/github/select/) -from any of your public or private GitHub repositories with a `Dockerfile`. +You can [create an Automated Build]( +https://registry.hub.docker.com/builds/github/select/) from any of your +public or private GitHub repositories with a `Dockerfile`. -#### GitHub organizations +### GitHub Submodules -GitHub organizations appear once your membership to that organization is -made public on GitHub. To verify, you can look at the members tab for your -organization on GitHub. +If your GitHub repository contains links to private submodules, you'll +need to add a deploy key from your Docker Hub repository. -#### GitHub service hooks - -You can follow the below steps to configure the GitHub service hooks for your -Automated Build: +Your Docker Hub deploy key is located under the "Build Details" +menu on the Automated Build's main page in the Hub. Add this key +to your GitHub submodule by visiting the Settings page for the +repository on GitHub and selecting "Deploy keys". @@ -72,77 +82,153 @@ Automated Build: - - + + - - - + + + + +
    1.Login to Github.com, and visit your Repository page. Click on the repository "Settings" link. You will need admin rights to the repository in order to do this. So if you don't have admin rights, you will need to ask someone who does.Your automated build's deploy key is in the "Build Details" menu +under "Deploy keys".
    2.Service HooksClick on the "Service Hooks" link
    3.Find the service hook labeled DockerFind the service hook labeled "Docker" and click on it.
    4.Activate Service HooksClick on the "Active" checkbox and then the "Update settings" button, to save changes.In your GitHub submodule's repository Settings page, add the +deploy key from your Docker Hub Automated Build.
    + +### GitHub Organizations + +GitHub organizations will appear once your membership to that organization is +made public on GitHub. To verify, you can look at the members tab for your +organization on GitHub. + +### GitHub Service Hooks + +Follow the steps below to configure the GitHub service +hooks for your Automated Build: + + + + + + + + + + + + + + + + + + + + + + +
    StepScreenshotDescription
    1.Log in to Github.com, and go to your Repository page. Click on "Settings" on + the right side of the page. You must have admin privileges to the repository in order to do this.
    2.Webhooks & ServicesClick on "Webhooks & Services" on the left side of the page.
    3.Find the service labeled DockerFind the service labeled "Docker" and click on it.
    4.Activate Service HooksMake sure the "Active" checkbox is selected and click the "Update service" button to save your changes.
    -### Setting up Automated Builds with BitBucket +## Setting up Automated Builds with Bitbucket In order to setup an Automated Build, you need to first link your -[Docker Hub](https://hub.docker.com) account with a BitBucket one. This -will allow the registry to see your repositories. +[Docker Hub](https://hub.docker.com) account with a Bitbucket account. +This will allow the registry to see your repositories. -Click on the [Automated Builds tab](https://registry.hub.docker.com/builds/) to -get started and then select [+ Add -New](https://registry.hub.docker.com/builds/add/). +To get started, log into your Docker Hub account and click the +"+ Add Repository" button at the upper right of the screen. Then +select [Automated Build](https://registry.hub.docker.com/builds/add/). -Select the [BitBucket -service](https://registry.hub.docker.com/associate/bitbucket/). +Select the [Bitbucket source]( +https://registry.hub.docker.com/associate/bitbucket/). -Then follow the instructions to authorize and link your BitBucket account -to Docker Hub. +Then follow the onscreen instructions to authorize and link your +Bitbucket account to Docker Hub. Once it is linked, you'll be able +to choose a repo from which to create the Automatic Build. -#### Creating an Automated Build +### Creating an Automated Build You can [create an Automated Build]( https://registry.hub.docker.com/builds/bitbucket/select/) from any of your -public or private BitBucket repositories with a `Dockerfile`. +public or private Bitbucket repositories with a `Dockerfile`. -### The Dockerfile and Automated Builds +### Adding a Hook -During the build process, we copy the contents of your `Dockerfile`. We also -add it to the [Docker Hub](https://hub.docker.com) for the Docker community +When you link your Docker Hub account, a `POST` hook should get automatically +added to your Bitbucket repo. Follow the steps below to confirm or modify the +Bitbucket hooks for your Automated Build: + + + + + + + + + + + + + + + + + + + + + + + + +
    StepScreenshotDescription
    1.SettingsLog in to Bitbucket.org and go to your Repository page. Click on "Settings" on + the far left side of the page, under "Navigation". You must have admin privileges + to the repository in order to do this.
    2.HooksClick on "Hooks" on the near left side of the page, under "Settings".
    3.Docker Post HookYou should now see a list of hooks associated with the repo, including a POST hook that points at + registry.hub.docker.com/hooks/bitbucket.
    + + +## The Dockerfile and Automated Builds + +During the build process, Docker will copy the contents of your `Dockerfile`. +It will also add it to the [Docker Hub](https://hub.docker.com) for the Docker +community (for public repos) or approved team members/orgs (for private repos) to see on the repository page. -### README.md +## README.md -If you have a `README.md` file in your repository, we will use that as the -repository's full description. +If you have a `README.md` file in your repository, it will be used as the +repository's full description.The build process will look for a +`README.md` in the same directory as your `Dockerfile`. > **Warning:** > If you change the full description after a build, it will be > rewritten the next time the Automated Build has been built. To make changes, -> modify the README.md from the Git repository. We will look for a README.md -> in the same directory as your `Dockerfile`. +> modify the `README.md` from the Git repository. ### Build triggers -If you need another way to trigger your Automated Builds outside of GitHub -or BitBucket, you can setup a build trigger. When you turn on the build -trigger for an Automated Build, it will give you a URL to which you can -send POST requests. This will trigger the Automated Build process, which -is similar to GitHub webhooks. +If you need a way to trigger Automated Builds outside of GitHub or Bitbucket, +you can set up a build trigger. When you turn on the build trigger for an +Automated Build, it will give you a URL to which you can send POST requests. +This will trigger the Automated Build, much as with a GitHub webhook. -Build Triggers are available under the Settings tab of each Automated Build. +Build triggers are available under the Settings menu of each Automated Build +repo on the Docker Hub. > **Note:** > You can only trigger one build at a time and no more than one -> every five minutes. If you have a build already pending, or if you already +> every five minutes. If you already have a build pending, or if you > recently submitted a build request, those requests *will be ignored*. -> You can find the logs of last 10 triggers on the settings page to verify -> if everything is working correctly. +> To verify everything is working correctly, check the logs of last +> ten triggers on the settings page . ### Webhooks -Also available for Automated Builds are Webhooks. Webhooks can be called +Automated Builds also include a Webhooks feature. Webhooks can be called after a successful repository push is made. The webhook call will generate a HTTP POST with the following JSON @@ -179,24 +265,25 @@ payload: } ``` -Webhooks are available under the Settings tab of each Automated -Build. +Webhooks are available under the Settings menu of each Automated +Build's repo. -> **Note:** If you want to test your webhook out then we recommend using +> **Note:** If you want to test your webhook out we recommend using > a tool like [requestb.in](http://requestb.in/). ### Repository links -Repository links are a way to associate one Automated Build with another. If one -gets updated, linking system also triggers a build for the other Automated Build. -This makes it easy to keep your Automated Builds up to date. +Repository links are a way to associate one Automated Build with +another. If one gets updated,the linking system triggers a rebuild +for the other Automated Build. This makes it easy to keep all your +Automated Builds up to date. -To add a link, go to the settings page of an Automated Build and click on -*Repository Links*. Then enter the name of the repository that you want have -linked. +To add a link, go to the repo for the Automated Build you want to +link to and click on *Repository Links* under the Settings menu at +right. Then, enter the name of the repository that you want have linked. > **Warning:** > You can add more than one repository link, however, you should -> be very careful. Creating a two way relationship between Automated Builds will -> cause a never ending build loop. +> do so very carefully. Creating a two way relationship between Automated Builds will +> cause an endless build loop. diff --git a/components/engine/docs/sources/docker-hub/hub-images/bb_hooks.png b/components/engine/docs/sources/docker-hub/hub-images/bb_hooks.png new file mode 100644 index 0000000000..d51cd03ac4 Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/bb_hooks.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/bb_menu.png b/components/engine/docs/sources/docker-hub/hub-images/bb_menu.png new file mode 100644 index 0000000000..6f4a6813ef Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/bb_menu.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/bb_post-hook.png b/components/engine/docs/sources/docker-hub/hub-images/bb_post-hook.png new file mode 100644 index 0000000000..78c4730665 Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/bb_post-hook.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/deploy_key.png b/components/engine/docs/sources/docker-hub/hub-images/deploy_key.png new file mode 100644 index 0000000000..c4377bba9b Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/deploy_key.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/gh_docker-service.png b/components/engine/docs/sources/docker-hub/hub-images/gh_docker-service.png new file mode 100644 index 0000000000..0119b9e22a Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/gh_docker-service.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/gh_menu.png b/components/engine/docs/sources/docker-hub/hub-images/gh_menu.png new file mode 100644 index 0000000000..d9c8d11996 Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/gh_menu.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/gh_service_hook.png b/components/engine/docs/sources/docker-hub/hub-images/gh_service_hook.png new file mode 100644 index 0000000000..9a00153bb1 Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/gh_service_hook.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/gh_settings.png b/components/engine/docs/sources/docker-hub/hub-images/gh_settings.png new file mode 100644 index 0000000000..efb1a3abf5 Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/gh_settings.png differ diff --git a/components/engine/docs/sources/docker-hub/hub-images/github_deploy_key.png b/components/engine/docs/sources/docker-hub/hub-images/github_deploy_key.png new file mode 100644 index 0000000000..bd69054b14 Binary files /dev/null and b/components/engine/docs/sources/docker-hub/hub-images/github_deploy_key.png differ diff --git a/components/engine/docs/sources/docker-hub/repos.md b/components/engine/docs/sources/docker-hub/repos.md index c219a1989a..8d76d5a5df 100644 --- a/components/engine/docs/sources/docker-hub/repos.md +++ b/components/engine/docs/sources/docker-hub/repos.md @@ -65,9 +65,9 @@ optimized and up-to-date image to power your applications. > **Note:** > If you would like to contribute an official repository for your > organization, product or team you can see more information -> [here](https://github.com/dotcloud/stackbrew). +> [here](https://github.com/docker/stackbrew). -## Private Docker Repositories +## Private Repositories Private repositories allow you to have repositories that contain images that you want to keep private, either to your own account or within an diff --git a/components/engine/docs/sources/examples/apt-cacher-ng.Dockerfile b/components/engine/docs/sources/examples/apt-cacher-ng.Dockerfile index 3b7862bb58..d1f76572b9 100644 --- a/components/engine/docs/sources/examples/apt-cacher-ng.Dockerfile +++ b/components/engine/docs/sources/examples/apt-cacher-ng.Dockerfile @@ -9,7 +9,7 @@ FROM ubuntu MAINTAINER SvenDowideit@docker.com VOLUME ["/var/cache/apt-cacher-ng"] -RUN apt-get update ; apt-get install -yq apt-cacher-ng +RUN apt-get update && apt-get install -y apt-cacher-ng EXPOSE 3142 -CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/* +CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/* diff --git a/components/engine/docs/sources/examples/apt-cacher-ng.md b/components/engine/docs/sources/examples/apt-cacher-ng.md index 34e4a4bf02..7dafec1593 100644 --- a/components/engine/docs/sources/examples/apt-cacher-ng.md +++ b/components/engine/docs/sources/examples/apt-cacher-ng.md @@ -28,10 +28,10 @@ Use the following Dockerfile: MAINTAINER SvenDowideit@docker.com VOLUME ["/var/cache/apt-cacher-ng"] - RUN apt-get update ; apt-get install -yq apt-cacher-ng + RUN apt-get update && apt-get install -y apt-cacher-ng EXPOSE 3142 - CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/* + CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/* To build the image using: @@ -61,7 +61,7 @@ a local version of a common base: FROM ubuntu RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy - RUN apt-get update ; apt-get install vim git + RUN apt-get update && apt-get install -y vim git # docker build -t my_ubuntu . diff --git a/components/engine/docs/sources/examples/mongodb.md b/components/engine/docs/sources/examples/mongodb.md index 602f55ca88..28f7824594 100644 --- a/components/engine/docs/sources/examples/mongodb.md +++ b/components/engine/docs/sources/examples/mongodb.md @@ -65,13 +65,12 @@ a MongoDB repository file for the package manager. After this initial preparation we can update our packages and install MongoDB. # Update apt-get sources AND install MongoDB - RUN apt-get update - RUN apt-get install -y -q mongodb-org + RUN apt-get update && apt-get install -y mongodb-org > **Tip:** You can install a specific version of MongoDB by using a list > of required packages with versions, e.g.: > -> RUN apt-get install -y -q mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1 +> RUN apt-get update && apt-get install -y mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1 MongoDB requires a data directory. Let's create it as the final step of our installation instructions. diff --git a/components/engine/docs/sources/examples/mongodb/Dockerfile b/components/engine/docs/sources/examples/mongodb/Dockerfile index e7acc0fd85..9333eb5811 100644 --- a/components/engine/docs/sources/examples/mongodb/Dockerfile +++ b/components/engine/docs/sources/examples/mongodb/Dockerfile @@ -11,8 +11,7 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list # Update apt-get sources AND install MongoDB -RUN apt-get update -RUN apt-get install -y -q mongodb-org +RUN apt-get update && apt-get install -y mongodb-org # Create the MongoDB data directory RUN mkdir -p /data/db @@ -20,5 +19,5 @@ RUN mkdir -p /data/db # Expose port #27017 from the container to the host EXPOSE 27017 -# Set usr/bin/mongod as the dockerized entry-point application -ENTRYPOINT usr/bin/mongod +# Set /usr/bin/mongod as the dockerized entry-point application +ENTRYPOINT ["/usr/bin/mongod"] diff --git a/components/engine/docs/sources/examples/nodejs_web_app.md b/components/engine/docs/sources/examples/nodejs_web_app.md index a7b8eea7e3..5d69fd713b 100644 --- a/components/engine/docs/sources/examples/nodejs_web_app.md +++ b/components/engine/docs/sources/examples/nodejs_web_app.md @@ -66,10 +66,10 @@ requires to build (this example uses Docker 0.3.4): Next, define the parent image you want to use to build your own image on top of. Here, we'll use -[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `6.4`) +[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`) available on the [Docker Hub](https://hub.docker.com/): - FROM centos:6.4 + FROM centos:centos6 Since we're building a Node.js app, you'll have to install Node.js as well as npm on your CentOS image. Node.js is required to run your app @@ -84,11 +84,11 @@ via-package-manager#rhelcentosscientific-linux-6): # Install Node.js and npm RUN yum install -y npm -To bundle your app's source code inside the Docker image, use the `ADD` +To bundle your app's source code inside the Docker image, use the `COPY` instruction: # Bundle app source - ADD . /src + COPY . /src Install your app dependencies using the `npm` binary: @@ -109,7 +109,7 @@ defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js` Your `Dockerfile` should now look like this: # DOCKER-VERSION 0.3.4 - FROM centos:6.4 + FROM centos:centos6 # Enable EPEL for Node.js RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm @@ -117,7 +117,7 @@ Your `Dockerfile` should now look like this: RUN yum install -y npm # Bundle app source - ADD . /src + COPY . /src # Install app dependencies RUN cd /src; npm install @@ -127,7 +127,7 @@ Your `Dockerfile` should now look like this: ## Building your image Go to the directory that has your `Dockerfile` and run the following command -to build a Docker image. The `-t` flag let's you tag your image so it's easier +to build a Docker image. The `-t` flag lets you tag your image so it's easier to find later using the `docker images` command: $ sudo docker build -t /centos-node-hello . @@ -137,9 +137,9 @@ Your image will now be listed by Docker: $ sudo docker images # Example - REPOSITORY TAG ID CREATED - centos 6.4 539c0211cd76 8 weeks ago - /centos-node-hello latest d64d3505b0d2 2 hours ago + REPOSITORY TAG ID CREATED + centos centos6 539c0211cd76 8 weeks ago + /centos-node-hello latest d64d3505b0d2 2 hours ago ## Run the image diff --git a/components/engine/docs/sources/examples/postgresql_service.Dockerfile b/components/engine/docs/sources/examples/postgresql_service.Dockerfile index 364a18a81d..d0f37669d1 100644 --- a/components/engine/docs/sources/examples/postgresql_service.Dockerfile +++ b/components/engine/docs/sources/examples/postgresql_service.Dockerfile @@ -13,17 +13,13 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys B97B0AFCAA # of PostgreSQL, ``9.3``. RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list -# Update the Ubuntu and PostgreSQL repository indexes -RUN apt-get update - # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 # There are some warnings (in red) that show up during the build. You can hide # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive -RUN apt-get -y -q install python-software-properties software-properties-common -RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 +RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` -# after each ``apt-get`` +# after each ``apt-get`` # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` USER postgres diff --git a/components/engine/docs/sources/examples/postgresql_service.md b/components/engine/docs/sources/examples/postgresql_service.md index 5265935e3d..ffd122ed58 100644 --- a/components/engine/docs/sources/examples/postgresql_service.md +++ b/components/engine/docs/sources/examples/postgresql_service.md @@ -35,17 +35,13 @@ Start by creating a new `Dockerfile`: # of PostgreSQL, ``9.3``. RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list - # Update the Ubuntu and PostgreSQL repository indexes - RUN apt-get update - # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 # There are some warnings (in red) that show up during the build. You can hide # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive - RUN apt-get -y -q install python-software-properties software-properties-common - RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 + RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` - # after each ``apt-get`` + # after each ``apt-get`` # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` USER postgres @@ -88,7 +84,7 @@ Containers*](/userguide/dockerlinks), or we can access it from our host > **Note**: > The `--rm` removes the container and its image when -> the container exists successfully. +> the container exits successfully. ### Using container linking diff --git a/components/engine/docs/sources/examples/running_redis_service.md b/components/engine/docs/sources/examples/running_redis_service.md index 0eeef0625d..6d052da09e 100644 --- a/components/engine/docs/sources/examples/running_redis_service.md +++ b/components/engine/docs/sources/examples/running_redis_service.md @@ -13,8 +13,7 @@ Firstly, we create a `Dockerfile` for our new Redis image. FROM ubuntu:12.10 - RUN apt-get update - RUN apt-get -y install redis-server + RUN apt-get update && apt-get install -y redis-server EXPOSE 6379 ENTRYPOINT ["/usr/bin/redis-server"] @@ -49,9 +48,9 @@ container to only this container. Once inside our freshly created container we need to install Redis to get the `redis-cli` binary to test our connection. - $ apt-get update - $ apt-get -y install redis-server - $ service redis-server stop + $ sudo apt-get update + $ sudo apt-get install redis-server + $ sudo service redis-server stop As we've used the `--link redis:db` option, Docker has created some environment variables in our web application container. diff --git a/components/engine/docs/sources/examples/running_riak_service.md b/components/engine/docs/sources/examples/running_riak_service.md index 5909b7e2b0..c3d83bf663 100644 --- a/components/engine/docs/sources/examples/running_riak_service.md +++ b/components/engine/docs/sources/examples/running_riak_service.md @@ -14,7 +14,7 @@ Create an empty file called `Dockerfile`: $ touch Dockerfile Next, define the parent image you want to use to build your image on top -of. We'll use [Ubuntu](https://registry.hub.docker.cm/_/ubuntu/) (tag: +of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag: `latest`), which is available on [Docker Hub](https://hub.docker.com): # Riak @@ -25,13 +25,6 @@ of. We'll use [Ubuntu](https://registry.hub.docker.cm/_/ubuntu/) (tag: FROM ubuntu:latest MAINTAINER Hector Castro hector@basho.com -Next, we update the APT cache and apply any updates: - - # Update the APT cache - RUN sed -i.bak 's/main$/main universe/' /etc/apt/sources.list - RUN apt-get update - RUN apt-get upgrade -y - After that, we install and setup a few dependencies: - `curl` is used to download Basho's APT @@ -46,38 +39,29 @@ After that, we install and setup a few dependencies: # Install and setup project dependencies - RUN apt-get install -y curl lsb-release supervisor openssh-server + RUN apt-get update && apt-get install -y curl lsb-release supervisor openssh-server RUN mkdir -p /var/run/sshd RUN mkdir -p /var/log/supervisor RUN locale-gen en_US en_US.UTF-8 - ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf + COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf RUN echo 'root:basho' | chpasswd Next, we add Basho's APT repository: - RUN curl -s http://apt.basho.com/gpg/basho.apt.key | apt-key add -- + RUN curl -sSL http://apt.basho.com/gpg/basho.apt.key | apt-key add -- RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list - RUN apt-get update After that, we install Riak and alter a few defaults: # Install Riak and prepare it to run - RUN apt-get install -y riak + RUN apt-get update && apt-get install -y riak RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config RUN echo "ulimit -n 4096" >> /etc/default/riak -Almost there. Next, we add a hack to get us by the lack of -`initctl`: - - # Hack for initctl - # See: https://github.com/dotcloud/docker/issues/1024 - RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -s /bin/true /sbin/initctl - Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH: diff --git a/components/engine/docs/sources/examples/running_ssh_service.Dockerfile b/components/engine/docs/sources/examples/running_ssh_service.Dockerfile index 57baf88cef..1b8ed02a8a 100644 --- a/components/engine/docs/sources/examples/running_ssh_service.Dockerfile +++ b/components/engine/docs/sources/examples/running_ssh_service.Dockerfile @@ -5,10 +5,7 @@ FROM ubuntu:12.04 MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com" -# make sure the package repository is up to date -RUN apt-get update - -RUN apt-get install -y openssh-server +RUN apt-get update && apt-get install -y openssh-server RUN mkdir /var/run/sshd RUN echo 'root:screencast' |chpasswd diff --git a/components/engine/docs/sources/examples/running_ssh_service.md b/components/engine/docs/sources/examples/running_ssh_service.md index 579d372ba7..7140678e3b 100644 --- a/components/engine/docs/sources/examples/running_ssh_service.md +++ b/components/engine/docs/sources/examples/running_ssh_service.md @@ -15,10 +15,7 @@ quick access to a test container. FROM ubuntu:12.04 MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com" - # make sure the package repository is up to date - RUN apt-get update - - RUN apt-get install -y openssh-server + RUN apt-get update && apt-get install -y openssh-server RUN mkdir /var/run/sshd RUN echo 'root:screencast' |chpasswd @@ -27,7 +24,7 @@ quick access to a test container. Build the image using: - $ sudo docker build --rm -t eg_sshd . + $ sudo docker build -t eg_sshd . Then run it. You can then use `docker port` to find out what host port the container's port 22 is mapped to: diff --git a/components/engine/docs/sources/faq.md b/components/engine/docs/sources/faq.md index 667058c86f..531afc3ea7 100644 --- a/components/engine/docs/sources/faq.md +++ b/components/engine/docs/sources/faq.md @@ -14,8 +14,8 @@ paying. ### What open source license are you using? We are using the Apache License Version 2.0, see it here: -[https://github.com/dotcloud/docker/blob/master/LICENSE]( -https://github.com/dotcloud/docker/blob/master/LICENSE) +[https://github.com/docker/docker/blob/master/LICENSE]( +https://github.com/docker/docker/blob/master/LICENSE) ### Does Docker run on Mac OS X or Windows? @@ -225,9 +225,41 @@ Downloading and installing an "all-in-one" .deb or .rpm sounds great at first, except if you have no way to figure out that it contains a copy of the OpenSSL library vulnerable to the [Heartbleed](http://heartbleed.com/) bug. +### Why is `DEBIAN_FRONTEND=noninteractive` discouraged in Dockerfiles? + +When building Docker images on Debian and Ubuntu you may have seen errors like: + + unable to initialize frontend: Dialog + +These errors don't stop the image from being built but inform you that the +installation process tried to open a dialog box, but was unable to. +Generally, these errors are safe to ignore. + +Some people circumvent these errors by changing the `DEBIAN_FRONTEND` +environment variable inside the Dockerfile using: + + ENV DEBIAN_FRONTEND=noninteractive + +This prevents the installer from opening dialog boxes during installation +which stops the errors. + +While this may sound like a good idea, it *may* have side effects. +The `DEBIAN_FRONTEND` environment variable will be inherited by all +images and containers built from your image, effectively changing +their behavior. People using those images will run into problems when +installing software interactively, because installers will not show +any dialog boxes. + +Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is +mainly a 'cosmetic' change, we *discourage* changing it. + +If you *really* need to change its setting, make sure to change it +back to its [default value](https://www.debian.org/releases/stable/i386/ch05s03.html.en) +afterwards. + ### Can I help by adding some questions and answers? -Definitely! You can fork [the repo](https://github.com/dotcloud/docker) and +Definitely! You can fork [the repo](https://github.com/docker/docker) and edit the documentation sources. ### Where can I find more answers? @@ -237,7 +269,7 @@ You can find more answers on: - [Docker user mailinglist](https://groups.google.com/d/forum/docker-user) - [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev) - [IRC, docker on freenode](irc://chat.freenode.net#docker) -- [GitHub](https://github.com/dotcloud/docker) +- [GitHub](https://github.com/docker/docker) - [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) - [Join the conversation on Twitter](http://twitter.com/docker) diff --git a/components/engine/docs/sources/index.md b/components/engine/docs/sources/index.md index 75414b4364..5267557f38 100644 --- a/components/engine/docs/sources/index.md +++ b/components/engine/docs/sources/index.md @@ -94,7 +94,7 @@ To learn about Docker in more detail and to answer questions about usage and imp *`.dockerignore` support* You can now add a `.dockerignore` file next to your `Dockerfile` and Docker will ignore files and directories specified in that file when sending the build context to the daemon. -Example: https://github.com/dotcloud/docker/blob/master/.dockerignore +Example: https://github.com/docker/docker/blob/master/.dockerignore *Pause containers during commit* diff --git a/components/engine/docs/sources/installation/MAINTAINERS b/components/engine/docs/sources/installation/MAINTAINERS index 6a2f512d46..aca13975fd 100644 --- a/components/engine/docs/sources/installation/MAINTAINERS +++ b/components/engine/docs/sources/installation/MAINTAINERS @@ -1 +1,2 @@ google.md: Johan Euphrosine (@proppy) +softlayer.md: Phil Jackson (@underscorephil) diff --git a/components/engine/docs/sources/installation/binaries.md b/components/engine/docs/sources/installation/binaries.md index f6eb44fa64..8e35d98030 100644 --- a/components/engine/docs/sources/installation/binaries.md +++ b/components/engine/docs/sources/installation/binaries.md @@ -23,9 +23,9 @@ runtime: - a [properly mounted]( https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount - point [is](https://github.com/dotcloud/docker/issues/2683) - [not](https://github.com/dotcloud/docker/issues/3485) - [sufficient](https://github.com/dotcloud/docker/issues/4568)) + point [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) ## Check kernel dependencies diff --git a/components/engine/docs/sources/installation/centos.md b/components/engine/docs/sources/installation/centos.md index 3966d0f092..b919ca5806 100644 --- a/components/engine/docs/sources/installation/centos.md +++ b/components/engine/docs/sources/installation/centos.md @@ -4,23 +4,31 @@ page_keywords: Docker, Docker documentation, requirements, linux, centos, epel, # CentOS -The Docker package is available via the EPEL repository. These -instructions work for CentOS 6 and later. They will likely work for +While the Docker package is provided by default as part of CentOS-7, +it is provided by a community repository for CentOS-6. Please note that +this changes the installation instructions slightly between versions. + +These instructions work for CentOS 6 and later. They will likely work for other binary compatible EL6 distributions such as Scientific Linux, but they haven't been tested. -Please note that this package is part of [Extra Packages for Enterprise -Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort -to create and maintain additional packages for the RHEL distribution. - -Also note that due to the current Docker limitations, Docker is able to +Please note that due to the current Docker limitations, Docker is able to run only on the **64 bit** architecture. To run Docker, you will need [CentOS6](http://www.centos.org) or higher, with a kernel version 2.6.32-431 or higher as this has specific kernel fixes to allow Docker to run. -## Installation +## Installing Docker - CentOS-7 +Docker is included by default in the CentOS-Extras repository. To install +simply run the following command. + + $ sudo yum install docker + +## Installing Docker - CentOS-6 +Please note that this for CentOS-6, this package is part of [Extra Packages +for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort +to create and maintain additional packages for the RHEL distribution. Firstly, you need to ensure you have the EPEL repository enabled. Please follow the [EPEL installation instructions]( @@ -39,7 +47,9 @@ will install Docker on our host. $ sudo yum install docker-io -Now that it's installed, let's start the Docker daemon. +## Using Docker + +Once Docker is installed, you will need to start the docker daemon. $ sudo service docker start @@ -50,7 +60,7 @@ If we want Docker to start at boot, we should also: Now let's verify that Docker is working. First we'll need to get the latest `centos` image. - $ sudo docker pull centos:latest + $ sudo docker pull centos Next we'll make sure that we can see the image by running: @@ -69,6 +79,12 @@ Run a simple bash shell to test the image: If everything is working properly, you'll get a simple bash prompt. Type exit to continue. +## Dockerfiles +The CentOS Project provides a number of sample Dockerfiles which you may use +either as templates or to familiarize yourself with docker. These templates +are available on github at [https://github.com/CentOS/CentOS-Dockerfiles]( +https://github.com/CentOS/CentOS-Dockerfiles) + **Done!** You can either continue with the [Docker User Guide](/userguide/) or explore and build on the images yourself. diff --git a/components/engine/docs/sources/installation/debian.md b/components/engine/docs/sources/installation/debian.md index 0ad54b4328..0da2f2f5d0 100644 --- a/components/engine/docs/sources/installation/debian.md +++ b/components/engine/docs/sources/installation/debian.md @@ -23,8 +23,6 @@ To install the latest Debian package (may not be the latest Docker release): $ sudo apt-get update $ sudo apt-get install docker.io - $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker - $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io To verify that everything has worked as expected: diff --git a/components/engine/docs/sources/installation/fedora.md b/components/engine/docs/sources/installation/fedora.md index a230aa6cf5..757b3e9c44 100644 --- a/components/engine/docs/sources/installation/fedora.md +++ b/components/engine/docs/sources/installation/fedora.md @@ -68,7 +68,7 @@ and above. If you are behind a HTTP proxy server, for example in corporate settings, you will need to add this configuration in the Docker *systemd service file*. -Edit file `/lib/systemd/system/docker.service`. Add the following to +Edit file `/usr/lib/systemd/system/docker.service`. Add the following to section `[Service]` : Environment="HTTP_PROXY=http://proxy.example.com:80/" diff --git a/components/engine/docs/sources/installation/gentoolinux.md b/components/engine/docs/sources/installation/gentoolinux.md index 62fdc9f00e..ac92ad39c8 100644 --- a/components/engine/docs/sources/installation/gentoolinux.md +++ b/components/engine/docs/sources/installation/gentoolinux.md @@ -39,6 +39,9 @@ and especially missing kernel configuration flags and/or dependencies, https://github.com/tianon/docker-overlay/issues) or ping tianon directly in the #docker IRC channel on the freenode network. +Other use flags are described in detail on [tianon's +blog](https://tianon.github.io/post/2014/05/17/docker-on-gentoo.html). + ## Starting Docker Ensure that you are running a kernel that includes all the necessary diff --git a/components/engine/docs/sources/installation/google.md b/components/engine/docs/sources/installation/google.md index b6c1b3d275..23a9bfbfb8 100644 --- a/components/engine/docs/sources/installation/google.md +++ b/components/engine/docs/sources/installation/google.md @@ -12,7 +12,7 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput 2. Download and configure the [Google Cloud SDK][3] to use your project with the following commands: - $ curl https://sdk.cloud.google.com | bash + $ curl -sSL https://sdk.cloud.google.com | bash $ gcloud auth login $ gcloud config set project @@ -20,15 +20,18 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput (select a zone close to you and the desired instance size) $ gcloud compute instances create docker-playground \ - --image https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140522 \ + --image container-vm-v20140730 \ + --image-project google-containers \ --zone us-central1-a \ --machine-type f1-micro 4. Connect to the instance using SSH: $ gcloud compute ssh --zone us-central1-a docker-playground - docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' - docker on GCE \o/ + $$ docker-playground:~$ sudo docker run hello-world + Hello from Docker. + This message shows that your installation appears to be working correctly. + ... Read more about [deploying Containers on Google Cloud Platform][5]. diff --git a/components/engine/docs/sources/installation/images/osx-Boot2Docker-Start-app.png b/components/engine/docs/sources/installation/images/osx-Boot2Docker-Start-app.png deleted file mode 100644 index 21e2b7717d..0000000000 Binary files a/components/engine/docs/sources/installation/images/osx-Boot2Docker-Start-app.png and /dev/null differ diff --git a/components/engine/docs/sources/installation/images/osx-installer.png b/components/engine/docs/sources/installation/images/osx-installer.png index 635ac354ed..dbb6bcd2d9 100644 Binary files a/components/engine/docs/sources/installation/images/osx-installer.png and b/components/engine/docs/sources/installation/images/osx-installer.png differ diff --git a/components/engine/docs/sources/installation/mac.md b/components/engine/docs/sources/installation/mac.md index 2aff0e5b89..da0e172892 100644 --- a/components/engine/docs/sources/installation/mac.md +++ b/components/engine/docs/sources/installation/mac.md @@ -7,13 +7,13 @@ page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualB > **Note:** > Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer. -The Docker Engine uses Linux-specific kernel features, so to run it on OS X -we need to use a lightweight virtual machine (vm). You use the OS X Docker client to +Because the Docker Engine uses Linux-specific kernel features, you'll need to use a +lightweight virtual machine (VM) to run it on OS X. You use the OS X Docker client to control the virtualized Docker Engine to build, run, and manage Docker containers. -To make this process easier, we've designed a helper application called -[Boot2Docker](https://github.com/boot2docker/boot2docker) that installs the -virtual machine and runs the Docker daemon. +To make this process easier, we've built a helper application called +[Boot2Docker](https://github.com/boot2docker/boot2docker) that installs a +virtual machine (using VirtualBox) that's all set up to run the Docker daemon. ## Demonstration @@ -22,50 +22,67 @@ virtual machine and runs the Docker daemon. ## Installation 1. Download the latest release of the [Docker for OS X Installer]( - https://github.com/boot2docker/osx-installer/releases) + https://github.com/boot2docker/osx-installer/releases) (Look for the green + Boot2Docker-x.x.x.pkg button near the bottom of the page.) -2. Run the installer, which will install VirtualBox and the Boot2Docker management - tool. +2. Run the installer by double-clicking the downloaded package, which will install a +VirtualBox VM, Docker itself, and the Boot2Docker management tool. ![](/installation/images/osx-installer.png) -3. Run the `Boot2Docker` app in the `Applications` folder: - ![](/installation/images/osx-Boot2Docker-Start-app.png) - - Or, to initialize Boot2Docker manually, open a terminal and run: +3. Locate the `Boot2Docker` app in your `Applications` folder and run it. + Or, you can initialize Boot2Docker from the command line by running: $ boot2docker init $ boot2docker start $ export DOCKER_HOST=tcp://$(boot2docker ip 2>/dev/null):2375 +A terminal window will open and you'll see the virtual machine starting up. Once you have an initialized virtual machine, you can control it with `boot2docker stop` and `boot2docker start`. +> **Note:** +> If you see a message in the terminal that looks something like this: +> +> `To connect the Docker client to the Docker daemon, please set: export +DOCKER_HOST=tcp://192.168.59.103:2375` +> +you can safely set the evironment variable as instructed. + +View the +[Boot2Docker ReadMe](https://github.com/boot2docker/boot2docker/blob/master/README.md) +for more information. + ## Upgrading 1. Download the latest release of the [Docker for OS X Installer]( https://github.com/boot2docker/osx-installer/releases) -2. Run the installer, which will update VirtualBox and the Boot2Docker management - tool. +2. If Boot2Docker is currently running, stop it with `boot2docker stop`. Then, run +the installer package, which will update Docker and the Boot2Docker management tool. -3. To upgrade your existing virtual machine, open a terminal and run: +3. To complete the upgrade, you also need to update your existing virtual machine. Open a +terminal window and run: $ boot2docker stop $ boot2docker download $ boot2docker start +This will download an .iso containing a fresh VM and start it up. + ## Running Docker -From your terminal, you can test that Docker is running with a “hello world” example. -Start the vm and then run: +From your terminal, you can test that Docker is running with our small `hello-world` +example image: +Start the vm (`boot2docker start`) and then run: - $ docker run ubuntu echo hello world + $ docker run hello-world -This should download the `ubuntu` image and print `hello world`. +This should download the `hello-world` image, which then creates a small +container with an executable that prints a brief `Hello from Docker.` message. ## Container port redirection -The latest version of `boot2docker` sets up a host only network adaptor which provides +The latest version of `boot2docker` sets up a host-only network adaptor which provides access to the container's ports. If you run a container with an exposed port, @@ -76,14 +93,16 @@ then you should be able to access that Nginx server using the IP address reporte $ boot2docker ip -Typically, it is 192.168.59.103, but it could get changed by Virtualbox's DHCP -implementation. +Typically, it is 192.168.59.103:2375, but VirtualBox's DHCP implementation might change +this address in the future. # Further details -If you are curious, the username for the boot2docker default user is `docker` and the password is `tcuser`. +If you are curious, the username for the boot2docker default user is `docker` and the +password is `tcuser`. -The Boot2Docker management tool provides several commands: +The Boot2Docker management tool provides several additional commands for working with the +VM and Docker: $ ./boot2docker Usage: ./boot2docker [] diff --git a/components/engine/docs/sources/installation/openSUSE.md b/components/engine/docs/sources/installation/openSUSE.md index c03c74a811..951b8770cc 100644 --- a/components/engine/docs/sources/installation/openSUSE.md +++ b/components/engine/docs/sources/installation/openSUSE.md @@ -47,13 +47,27 @@ The docker package creates a new group named docker. Users, other than root user, need to be part of this group in order to interact with the Docker daemon. You can add users with: - $ sudo usermod -a -G docker + $ sudo /usr/sbin/usermod -a -G docker To verify that everything has worked as expected: - $ sudo docker run --rm -i -t ubuntu /bin/bash + $ sudo docker run --rm -i -t opensuse /bin/bash + +This should download and import the `opensuse` image, and then start `bash` in +a container. To exit the container type `exit`. + +If you want your containers to be able to access the external network you must +enable the `net.ipv4.ip_forward` rule. +This can be done using YaST by browsing to the +`Network Devices -> Network Settings -> Routing` menu and ensuring that the +`Enable IPv4 Forwarding` box is checked. + +This option cannot be changed when networking is handled by the Network Manager. +In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by +hand to ensure the `FW_ROUTE` flag is set to `yes` like so: + + FW_ROUTE="yes" -This should download and import the `ubuntu` image, and then start `bash` in a container. To exit the container type `exit`. **Done!** diff --git a/components/engine/docs/sources/installation/rhel.md b/components/engine/docs/sources/installation/rhel.md index c144573687..a8b785acca 100644 --- a/components/engine/docs/sources/installation/rhel.md +++ b/components/engine/docs/sources/installation/rhel.md @@ -2,15 +2,35 @@ page_title: Installation on Red Hat Enterprise Linux page_description: Installation instructions for Docker on Red Hat Enterprise Linux. page_keywords: Docker, Docker documentation, requirements, linux, rhel, centos -# Red Hat Enterprise Linux +# Red Hat Enterprise Linux 7 -Docker is available for **RHEL** on EPEL. These instructions should work -for both RHEL and CentOS. They will likely work for other binary -compatible EL6 distributions as well, but they haven't been tested. +**Red Hat Enterprise Linux 7** has [shipped with +Docker](https://access.redhat.com/site/products/red-hat-enterprise-linux/docker-and-containers). +An overview and some guidance can be found in the [Release +Notes](https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/7.0_Release_Notes/chap-Red_Hat_Enterprise_Linux-7.0_Release_Notes-Linux_Containers_with_Docker_Format.html). -Please note that this package is part of [Extra Packages for Enterprise -Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort -to create and maintain additional packages for the RHEL distribution. +Docker is located in the *extras* channel. To install Docker: + +1. Enable the *extras* channel: + + $ sudo subscription-manager repos --enable=rhel-7-server-extras-rpms + +2. Install Docker: + + $ sudo yum install docker + +Additional installation, configuration, and usage information, +including a [Get Started with Docker Containers in Red Hat +Enterprise Linux 7](https://access.redhat.com/site/articles/881893) +guide, can be found by Red Hat customers on the [Red Hat Customer +Portal](https://access.redhat.com/). + +# Red Hat Enterprise Linux 6 + +Docker is available for **RHEL** on EPEL. Please note that +this package is part of [Extra Packages for Enterprise Linux +(EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort to +create and maintain additional packages for the RHEL distribution. Also note that due to the current Docker limitations, Docker is able to run only on the **64 bit** architecture. diff --git a/components/engine/docs/sources/installation/softlayer.md b/components/engine/docs/sources/installation/softlayer.md index d01866720c..d594896a92 100644 --- a/components/engine/docs/sources/installation/softlayer.md +++ b/components/engine/docs/sources/installation/softlayer.md @@ -6,22 +6,22 @@ page_keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, inst 1. Create an [IBM SoftLayer account]( https://www.softlayer.com/cloud-servers/). -2. Log in to the [SoftLayer Console]( - https://control.softlayer.com/devices/). -3. Go to [Order Hourly Computing Instance Wizard]( - https://manage.softlayer.com/Sales/orderHourlyComputingInstance) - on your SoftLayer Console. -4. Create a new *CloudLayer Computing Instance* (CCI) using the default +2. Log in to the [SoftLayer Customer Portal]( + https://control.softlayer.com/). +3. From the *Devices* menu select [*Device List*](https://control.softlayer.com/devices) +4. Click *Order Devices* on the top right of the window below the menu bar. +5. Under *Virtual Server* click [*Hourly*](https://manage.softlayer.com/Sales/orderHourlyComputingInstance) +6. Create a new *SoftLayer Virtual Server Instance* (VSI) using the default values for all the fields and choose: - - *First Available* as `Datacenter` and + - The desired location for *Datacenter* - *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* - as `Operating System`. + for *Operating System*. -5. Click the *Continue Your Order* button at the bottom right and - select *Go to checkout*. -6. Insert the required *User Metadata* and place the order. -7. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) +7. Click the *Continue Your Order* button at the bottom right. +8. Fill out VSI *hostname* and *domain*. +9. Insert the required *User Metadata* and place the order. +10. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) instructions. ## What next? diff --git a/components/engine/docs/sources/installation/ubuntulinux.md b/components/engine/docs/sources/installation/ubuntulinux.md index 5d1b6c3fbf..673ea18b0c 100644 --- a/components/engine/docs/sources/installation/ubuntulinux.md +++ b/components/engine/docs/sources/installation/ubuntulinux.md @@ -63,7 +63,7 @@ continue installation.* > > There is also a simple `curl` script available to help with this process. > -> $ curl -s https://get.docker.io/ubuntu/ | sudo sh +> $ curl -sSL https://get.docker.io/ubuntu/ | sudo sh To verify that everything has worked as expected: @@ -134,7 +134,7 @@ continue installation.* > > There is also a simple `curl` script available to help with this process. > -> $ curl -s https://get.docker.io/ubuntu/ | sudo sh +> $ curl -sSL https://get.docker.io/ubuntu/ | sudo sh Now verify that the installation has worked by downloading the `ubuntu` image and launching a container. @@ -266,11 +266,11 @@ These parameters will help you get rid of the following warnings: ## Troubleshooting -On Linux Mint, the `cgroup-lite` package is not +On Linux Mint, the `cgroup-lite` and `apparmor` packages are not installed by default. Before Docker will work correctly, you will need to install this via: - $ sudo apt-get update && sudo apt-get install cgroup-lite + $ sudo apt-get update && sudo apt-get install cgroup-lite apparmor ## Docker and UFW diff --git a/components/engine/docs/sources/installation/windows.md b/components/engine/docs/sources/installation/windows.md index 9908c053d0..6220cd6b6e 100644 --- a/components/engine/docs/sources/installation/windows.md +++ b/components/engine/docs/sources/installation/windows.md @@ -51,11 +51,11 @@ and the Boot2Docker management tool. Boot2Docker will log you in automatically so you can start using Docker right away. -Let's try the “hello world” example. Run +Let's try the `hello-world` example image. Run - $ docker run busybox echo hello world + $ docker run hello-world -This will download the small busybox image and print "hello world". +This should download the very small `hello-world` image and print a `Hello from Docker.` message. # Further Details diff --git a/components/engine/docs/sources/introduction/understanding-docker.md b/components/engine/docs/sources/introduction/understanding-docker.md index c79573a635..9448f68d8b 100644 --- a/components/engine/docs/sources/introduction/understanding-docker.md +++ b/components/engine/docs/sources/introduction/understanding-docker.md @@ -55,7 +55,7 @@ Docker's portability and lightweight nature also make dynamically managing workloads easy. You can use Docker to quickly scale up or tear down applications and services. Docker's speed means that scaling can be near real time. -*Achieving higher density and running more workloads** +*Achieving higher density and running more workloads* Docker is lightweight and fast. It provides a viable, cost-effective alternative to hypervisor-based virtual machines. This is especially useful in high density @@ -79,7 +79,7 @@ Docker uses a client-server architecture. The Docker *client* talks to the Docker *daemon*, which does the heavy lifting of building, running, and distributing your Docker containers. Both the Docker client and the daemon *can* run on the same system, or you can connect a Docker client to a remote Docker -daemon. The Docker client and service communicate via sockets or through a +daemon. The Docker client and daemon communicate via sockets or through a RESTful API. ![Docker Architecture Diagram](/article-img/architecture.svg) @@ -157,7 +157,7 @@ this as the base of all your web application images. > **Note:** Docker usually gets these base images from > [Docker Hub](https://hub.docker.com). -> + Docker images are then built from these base images using a simple, descriptive set of steps we call *instructions*. Each instruction creates a new layer in our image. Instructions include actions like: diff --git a/components/engine/docs/sources/reference/api.md b/components/engine/docs/sources/reference/api.md deleted file mode 100644 index b617211037..0000000000 --- a/components/engine/docs/sources/reference/api.md +++ /dev/null @@ -1,87 +0,0 @@ -# APIs - -Your programs and scripts can access Docker's functionality via these -interfaces: - - - [Registry & Index Spec](registry_index_spec/) - - [1. The 3 roles](registry_index_spec/#the-3-roles) - - [1.1 Index](registry_index_spec/#index) - - [1.2 Registry](registry_index_spec/#registry) - - [1.3 Docker](registry_index_spec/#docker) - - - [2. Workflow](registry_index_spec/#workflow) - - [2.1 Pull](registry_index_spec/#pull) - - [2.2 Push](registry_index_spec/#push) - - [2.3 Delete](registry_index_spec/#delete) - - - [3. How to use the Registry in standalone mode](registry_index_spec/#how-to-use-the-registry-in-standalone-mode) - - [3.1 Without an Index](registry_index_spec/#without-an-index) - - [3.2 With an Index](registry_index_spec/#with-an-index) - - - [4. The API](registry_index_spec/#the-api) - - [4.1 Images](registry_index_spec/#images) - - [4.2 Users](registry_index_spec/#users) - - [4.3 Tags (Registry)](registry_index_spec/#tags-registry) - - [4.4 Images (Index)](registry_index_spec/#images-index) - - [4.5 Repositories](registry_index_spec/#repositories) - - - [5. Chaining Registries](registry_index_spec/#chaining-registries) - - [6. Authentication & Authorization](registry_index_spec/#authentication-authorization) - - [6.1 On the Index](registry_index_spec/#on-the-index) - - [6.2 On the Registry](registry_index_spec/#on-the-registry) - - - [7 Document Version](registry_index_spec/#document-version) - - - [Docker Registry API](registry_api/) - - [1. Brief introduction](registry_api/#brief-introduction) - - [2. Endpoints](registry_api/#endpoints) - - [2.1 Images](registry_api/#images) - - [2.2 Tags](registry_api/#tags) - - [2.3 Repositories](registry_api/#repositories) - - [2.4 Status](registry_api/#status) - - - [3 Authorization](registry_api/#authorization) - - - [Docker Hub API](index_api/) - - [1. Brief introduction](index_api/#brief-introduction) - - [2. Endpoints](index_api/#endpoints) - - [2.1 Repository](index_api/#repository) - - [2.2 Users](index_api/#users) - - [2.3 Search](index_api/#search) - - - [Docker Remote API](docker_remote_api/) - - [1. Brief introduction](docker_remote_api/#brief-introduction) - - [2. Versions](docker_remote_api/#versions) - - [v1.12](docker_remote_api/#v1-12) - - [v1.11](docker_remote_api/#v1-11) - - [v1.10](docker_remote_api/#v1-10) - - [v1.9](docker_remote_api/#v1-9) - - [v1.8](docker_remote_api/#v1-8) - - [v1.7](docker_remote_api/#v1-7) - - [v1.6](docker_remote_api/#v1-6) - - [v1.5](docker_remote_api/#v1-5) - - [v1.4](docker_remote_api/#v1-4) - - [v1.3](docker_remote_api/#v1-3) - - [v1.2](docker_remote_api/#v1-2) - - [v1.1](docker_remote_api/#v1-1) - - [v1.0](docker_remote_api/#v1-0) - - - [Docker Remote API Client Libraries](remote_api_client_libraries/) - - [docker.io OAuth API](docker_io_oauth_api/) - - [1. Brief introduction](docker_io_oauth_api/#brief-introduction) - - [2. Register Your Application](docker_io_oauth_api/#register-your-application) - - [3. Endpoints](docker_io_oauth_api/#endpoints) - - [3.1 Get an Authorization Code](docker_io_oauth_api/#get-an-authorization-code) - - [3.2 Get an Access Token](docker_io_oauth_api/#get-an-access-token) - - [3.3 Refresh a Token](docker_io_oauth_api/#refresh-a-token) - - - [4. Use an Access Token with the API](docker_io_oauth_api/#use-an-access-token-with-the-api) - - - [docker.io Accounts API](docker_io_accounts_api/) - - [1. Endpoints](docker_io_accounts_api/#endpoints) - - [1.1 Get a single user](docker_io_accounts_api/#get-a-single-user) - - [1.2 Update a single user](docker_io_accounts_api/#update-a-single-user) - - [1.3 List email addresses for a user](docker_io_accounts_api/#list-email-addresses-for-a-user) - - [1.4 Add email address for a user](docker_io_accounts_api/#add-email-address-for-a-user) - - [1.5 Update an email address for a user](docker_io_accounts_api/#update-an-email-address-for-a-user) - - [1.6 Delete email address for a user](docker_io_accounts_api/#delete-email-address-for-a-user) diff --git a/components/engine/docs/sources/reference/api/docker-io_api.md b/components/engine/docs/sources/reference/api/docker-io_api.md index d5be332d35..e34e43f3bf 100644 --- a/components/engine/docs/sources/reference/api/docker-io_api.md +++ b/components/engine/docs/sources/reference/api/docker-io_api.md @@ -421,7 +421,7 @@ Registering a new account. Accept: application/json Content-Type: application/json - {"email": "sam@dotcloud.com", + {"email": "sam@docker.com", "password": "toto42", "username": "foobar"} @@ -468,7 +468,7 @@ Change a password or email address for given user. If you pass in an Content-Type: application/json Authorization: Basic akmklmasadalkm== - {"email": "sam@dotcloud.com", + {"email": "sam@docker.com", "password": "toto42"} Parameters: diff --git a/components/engine/docs/sources/reference/api/docker_io_oauth_api.md b/components/engine/docs/sources/reference/api/docker_io_oauth_api.md deleted file mode 100644 index c5d07720b8..0000000000 --- a/components/engine/docs/sources/reference/api/docker_io_oauth_api.md +++ /dev/null @@ -1,254 +0,0 @@ -page_title: docker.io OAuth API -page_description: API Documentation for docker.io's OAuth flow. -page_keywords: API, Docker, oauth, REST, documentation - -# docker.io OAuth API - -## 1. Brief introduction - -Some docker.io API requests will require an access token to -authenticate. To get an access token for a user, that user must first -grant your application access to their docker.io account. In order for -them to grant your application access you must first register your -application. - -Before continuing, we encourage you to familiarize yourself with [The -OAuth 2.0 Authorization Framework](http://tools.ietf.org/html/rfc6749). - -*Also note that all OAuth interactions must take place over https -connections* - -## 2. Register Your Application - -You will need to register your application with docker.io before users -will be able to grant your application access to their account -information. We are currently only allowing applications selectively. To -request registration of your application send an email to -[support-accounts@docker.com](mailto:support-accounts%40docker.com) with -the following information: - - - The name of your application - - A description of your application and the service it will provide to - docker.io users. - - A callback URI that we will use for redirecting authorization - requests to your application. These are used in the step of getting - an Authorization Code. The domain name of the callback URI will be - visible to the user when they are requested to authorize your - application. - -When your application is approved you will receive a response from the -docker.io team with your `client_id` and -`client_secret` which your application will use in -the steps of getting an Authorization Code and getting an Access Token. - -# 3. Endpoints - -## 3.1 Get an Authorization Code - -Once You have registered you are ready to start integrating docker.io -accounts into your application! The process is usually started by a user -following a link in your application to an OAuth Authorization endpoint. - -`GET /api/v1.1/o/authorize/` - -Request that a docker.io user authorize your application. If the -user is not already logged in, they will be prompted to login. The -user is then presented with a form to authorize your application for -the requested access scope. On submission, the user will be -redirected to the specified `redirect_uri` with -an Authorization Code. - - Query Parameters: - -   - - - **client_id** – The `client_id` given to - your application at registration. - - **response_type** – MUST be set to `code`. - This specifies that you would like an Authorization Code - returned. - - **redirect_uri** – The URI to redirect back to after the user - has authorized your application. If omitted, the first of your - registered `response_uris` is used. If - included, it must be one of the URIs which were submitted when - registering your application. - - **scope** – The extent of access permissions you are requesting. - Currently, the scope options are `profile_read`, `profile_write`, - `email_read`, and `email_write`. Scopes must be separated by a space. If omitted, the - default scopes `profile_read email_read` are - used. - - **state** – (Recommended) Used by your application to maintain - state between the authorization request and callback to protect - against CSRF attacks. - - **Example Request** - - Asking the user for authorization. - - GET /api/v1.1/o/authorize/?client_id=TestClientID&response_type=code&redirect_uri=https%3A//my.app/auth_complete/&scope=profile_read%20email_read&state=abc123 HTTP/1.1 - Host: www.docker.io - - **Authorization Page** - - When the user follows a link, making the above GET request, they - will be asked to login to their docker.io account if they are not - already and then be presented with the following authorization - prompt which asks the user to authorize your application with a - description of the requested scopes. - - ![](/reference/api/_static/io_oauth_authorization_page.png) - - Once the user allows or denies your Authorization Request the user - will be redirected back to your application. Included in that - request will be the following query parameters: - - `code` - : The Authorization code generated by the docker.io authorization - server. Present it again to request an Access Token. This code - expires in 60 seconds. - `state` - : If the `state` parameter was present in the - authorization request this will be the exact value received from - that request. - `error` - : An error message in the event of the user denying the - authorization or some other kind of error with the request. - -## 3.2 Get an Access Token - -Once the user has authorized your application, a request will be made to -your application's specified `redirect_uri` which -includes a `code` parameter that you must then use -to get an Access Token. - -`POST /api/v1.1/o/token/` - -Submit your newly granted Authorization Code and your application's -credentials to receive an Access Token and Refresh Token. The code -is valid for 60 seconds and cannot be used more than once. - - Request Headers: - -   - - - **Authorization** – HTTP basic authentication using your - application's `client_id` and - `client_secret` - - Form Parameters: - -   - - - **grant_type** – MUST be set to `authorization_code` - - **code** – The authorization code received from the user's - redirect request. - - **redirect_uri** – The same `redirect_uri` - used in the authentication request. - - **Example Request** - - Using an authorization code to get an access token. - - POST /api/v1.1/o/token/ HTTP/1.1 - Host: www.docker.io - Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ= - Accept: application/json - Content-Type: application/json - - { - "grant_type": "code", - "code": "YXV0aG9yaXphdGlvbl9jb2Rl", - "redirect_uri": "https://my.app/auth_complete/" - } - - **Example Response** - - HTTP/1.1 200 OK - Content-Type: application/json;charset=UTF-8 - - { - "username": "janedoe", - "user_id": 42, - "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS", - "expires_in": 15552000, - "token_type": "Bearer", - "scope": "profile_read email_read", - "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc" - } - - In the case of an error, there will be a non-200 HTTP Status and and - data detailing the error. - -## 3.3 Refresh a Token - -Once the Access Token expires you can use your `refresh_token` -to have docker.io issue your application a new Access Token, -if the user has not revoked access from your application. - -`POST /api/v1.1/o/token/` - -Submit your `refresh_token` and application's -credentials to receive a new Access Token and Refresh Token. The -`refresh_token` can be used only once. - - Request Headers: - -   - - - **Authorization** – HTTP basic authentication using your - application's `client_id` and - `client_secret` - - Form Parameters: - -   - - - **grant_type** – MUST be set to `refresh_token` - - **refresh_token** – The `refresh_token` - which was issued to your application. - - **scope** – (optional) The scope of the access token to be - returned. Must not include any scope not originally granted by - the user and if omitted is treated as equal to the scope - originally granted. - - **Example Request** - - Refreshing an access token. - - POST /api/v1.1/o/token/ HTTP/1.1 - Host: www.docker.io - Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ= - Accept: application/json - Content-Type: application/json - - { - "grant_type": "refresh_token", - "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc", - } - - **Example Response** - - HTTP/1.1 200 OK - Content-Type: application/json;charset=UTF-8 - - { - "username": "janedoe", - "user_id": 42, - "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS", - "expires_in": 15552000, - "token_type": "Bearer", - "scope": "profile_read email_read", - "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc" - } - - In the case of an error, there will be a non-200 HTTP Status and and - data detailing the error. - -# 4. Use an Access Token with the API - -Many of the docker.io API requests will require a Authorization request -header field. Simply ensure you add this header with "Bearer <`access_token`>": - - GET /api/v1.1/resource HTTP/1.1 - Host: docker.io - Authorization: Bearer 2YotnFZFEjr1zCsicMWpAA diff --git a/components/engine/docs/sources/reference/api/docker_remote_api.md b/components/engine/docs/sources/reference/api/docker_remote_api.md index 36f35383e1..e712f864f2 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api.md @@ -18,13 +18,39 @@ page_keywords: API, Docker, rcli, REST, documentation encoded (JSON) string with credentials: `{'username': string, 'password': string, 'email': string, 'serveraddress' : string}` -The current version of the API is v1.13 +The current version of the API is v1.14 -Calling `/images//insert` is the same as calling -`/v1.13/images//insert`. +Calling `/info` is the same as calling +`/v1.14/info`. You can still call an old version of the API using -`/v1.12/images//insert`. +`/v1.13/info`. + +## v1.14 + +### Full Documentation + +[*Docker Remote API v1.14*](/reference/api/docker_remote_api_v1.14/) + +### What's new + +`DELETE /containers/(id)` + +**New!** +When using `force`, the container will be immediately killed with SIGKILL. + +`POST /containers/(id)/start` + +**New!** +The `hostConfig` option now accepts the field `CapAdd`, which specifies a list of capabilities +to add, and the field `CapDrop`, which specifies a list of capabilities to drop. + +`POST /images/create` + +**New!** +The `fromImage` and `repo` parameters now supports the `repo:tag` format. +Consequently, the `tag` parameter is now obsolete. Using the new format and +the `tag` parameter at the same time will return an error. ## v1.13 @@ -100,7 +126,7 @@ after timestamp. `GET /containers/(id)/logs` -This url is prefered method for getting container logs now. +This url is preferred method for getting container logs now. ## v1.10 @@ -284,7 +310,7 @@ output is now generated in the client, using the **New!** You can now split stderr from stdout. This is done by -prefixing a header to each transmition. See +prefixing a header to each transmission. See [`POST /containers/(id)/attach`]( /reference/api/docker_remote_api_v1.9/#post--containers-(id)-attach "POST /containers/(id)/attach"). The WebSocket attach is unchanged. Note that attach calls on the @@ -344,7 +370,7 @@ Image's name added in the events ## v1.3 docker v0.5.0 -[51f6c4a](https://github.com/dotcloud/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909) +[51f6c4a](https://github.com/docker/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909) ### Full Documentation @@ -384,7 +410,7 @@ Start containers (/containers//start): ## v1.2 docker v0.4.2 -[2e7649b](https://github.com/dotcloud/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168) +[2e7649b](https://github.com/docker/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168) ### Full Documentation @@ -416,7 +442,7 @@ deleted/untagged. ## v1.1 docker v0.4.0 -[a8ae398](https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f) +[a8ae398](https://github.com/docker/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f) ### Full Documentation @@ -443,7 +469,7 @@ Uses json stream instead of HTML hijack, it looks like this: ## v1.0 docker v0.3.4 -[8d73740](https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4) +[8d73740](https://github.com/docker/docker/commit/8d73740343778651c09160cde9661f5f387b36f4) ### Full Documentation diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.0.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.0.md index b906298b85..197991de94 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.0.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.0.md @@ -194,7 +194,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -566,6 +566,13 @@ Insert a file from `url` in the image `name` at `path` {{ STREAM }} + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -670,12 +677,6 @@ Push the image `name` on the registry {{ STREAM }} - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.1.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.1.md index 4e449bccec..928e3210f2 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.1.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.1.md @@ -194,7 +194,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -573,6 +573,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -680,12 +687,6 @@ Push the image `name` on the registry {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.10.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.10.md index 264cdefc20..6ffae3e07e 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -220,7 +220,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { @@ -739,6 +739,13 @@ Insert a file from `url` in the image {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -846,11 +853,20 @@ Push the image `name` on the registry {"error":"Invalid..."} ... + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + + **Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + Query Parameters:   - - **registry** – the registry you wan to push, optional + - **tag** – the tag to associate with the image on the registry, optional Request Headers: diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.11.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.11.md index ae2daae407..a0187dbdf6 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -224,7 +224,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { @@ -861,11 +861,20 @@ Push the image `name` on the registry {"error":"Invalid..."} ... + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + + **Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + Query Parameters:   - - **registry** – the registry you wan to push, optional + - **tag** – the tag to associate with the image on the registry, optional Request Headers: diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.12.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.12.md index 19fb24fe48..9ea83e2853 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -90,6 +90,8 @@ List containers non-running ones. - **size** – 1/True/true or 0/False/false, Show the containers sizes + - **filters** – a JSON encoded value of the filters (a map[string][]string) + to process on the images list. Status Codes: @@ -224,7 +226,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { @@ -759,7 +761,7 @@ Copy files or folders of container `id`   - **all** – 1/True/true or 0/False/false, default false - - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. + - **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. @@ -808,30 +810,7 @@ Create an image, either by pull it from the registry or by importing it - **200** – no error - **500** – server error -### Insert a file in an image -`POST /images/(name)/insert` - -Insert a file from `url` in the image `name` at `path` - - **Example request**: - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - Status Codes: - - - **200** – no error - - **500** – server error ### Inspect an image @@ -937,11 +916,20 @@ Push the image `name` on the registry {"error":"Invalid..."} ... + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + + **Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + Query Parameters:   - - **registry** – the registry you wan to push, optional + - **tag** – the tag to associate with the image on the registry, optional Request Headers: diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.13.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.13.md index e0ad957941..d782391476 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -1,4 +1,4 @@ -page_title: Remote API v1.12 +page_title: Remote API v1.13 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation @@ -224,7 +224,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { @@ -808,30 +808,7 @@ Create an image, either by pull it from the registry or by importing it - **200** – no error - **500** – server error -### Insert a file in an image -`POST /images/(name)/insert` - -Insert a file from `url` in the image `name` at `path` - - **Example request**: - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - Status Codes: - - - **200** – no error - - **500** – server error ### Inspect an image @@ -937,11 +914,20 @@ Push the image `name` on the registry {"error":"Invalid..."} ... + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + + **Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + Query Parameters:   - - **registry** – the registry you wan to push, optional + - **tag** – the tag to associate with the image on the registry, optional Request Headers: @@ -1184,7 +1170,6 @@ Display system-wide information "NGoroutines":21, "NEventsListener":0, "InitPath":"/usr/bin/docker", - "Sockets":["unix:///var/run/docker.sock"], "IndexServerAddress":["https://index.docker.io/v1/"], "MemoryLimit":true, "SwapLimit":false, diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.14.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.14.md new file mode 100644 index 0000000000..9a9c36e54b --- /dev/null +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -0,0 +1,1411 @@ +page_title: Remote API v1.14 +page_description: API Documentation for Docker +page_keywords: API, Docker, rcli, REST, documentation + +# Docker Remote API v1.14 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + /use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + + **Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + + Query Parameters: + +   + + - **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default + - **limit** – Show `limit` last created + containers, include non-running ones. + - **since** – Show only containers created since Id, include + non-running ones. + - **before** – Show only containers created before Id, include + non-running ones. + - **size** – 1/True/true or 0/False/false, Show the containers + sizes + + Status Codes: + + - **200** – no error + - **400** – bad parameter + - **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + + **Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + + Json Parameters: + +   + + - **config** – the container's configuration + + Query Parameters: + +   + + - **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + + Status Codes: + + - **201** – no error + - **404** – no such container + - **406** – impossible to attach (container not running) + - **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + + **Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd: ["NET_ADMIN"], + "CapDrop: ["MKNOD"] + } + } + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + + **Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + + Query Parameters: + +   + + - **ps_args** – ps arguments to use (e.g., aux) + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + + **Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **follow** – 1/True/true or 0/False/false, return stream. Default false + - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false + - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false + - **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false + - **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + + **Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + + **Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + + **Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd: ["NET_ADMIN"], + "CapDrop: ["MKNOD"] + } + + **Example response**: + + HTTP/1.1 204 No Content + Content-Type: text/plain + + Json Parameters: + +   + + - **hostConfig** – the container's host configuration (optional) + + Status Codes: + + - **204** – no error + - **304** – container already started + - **404** – no such container + - **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + + **Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **t** – number of seconds to wait before killing the container + + Status Codes: + + - **204** – no error + - **304** – container already stopped + - **404** – no such container + - **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + + **Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **t** – number of seconds to wait before killing the container + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + + **Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters + + - **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + + **Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + + **Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Status Codes: + + - **204** – no error + - **404** – no such container + - **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + + **Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **logs** – 1/True/true or 0/False/false, return logs. Default + false + - **stream** – 1/True/true or 0/False/false, return stream. + Default false + - **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false + - **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false + - **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + + Status Codes: + + - **200** – no error + - **400** – bad parameter + - **404** – no such container + - **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + + - 0: stdin (will be written on stdout) + - 1: stdout + - 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 byets + 4. Read the extracted size and output it on the correct output + 5. Goto 1) + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + + **Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + + **Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 204 OK + + Query Parameters: + +   + + - **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false + - **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + + Status Codes: + + - **204** – no error + - **400** – bad parameter + - **404** – no such container + - **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + + **Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ STREAM }} + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + + Query Parameters: + +   + + - **all** – 1/True/true or 0/False/false, default false + - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. + + + +### Create an image + +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it + + **Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + + Query Parameters: + +   + + - **fromImage** – name of the image to pull + - **fromSrc** – source to import, - means stdin + - **repo** – repository + - **tag** – tag + - **registry** – the registry to pull from + + Request Headers: + +   + + - **X-Registry-Auth** – base64-encoded AuthConfig object + + Status Codes: + + - **200** – no error + - **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + + **Example request**: + + GET /images/base/json HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created":"2013-03-23T22:24:18.818426-07:00", + "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent":"27cf784147099545", + "Size": 6824592 + } + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + + **Example request**: + + GET /images/base/history HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + + **Example request**: + + POST /images/test/push HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + + **Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + + Query Parameters: + +   + + - **tag** – the tag to associate with the image on the registry, optional + + Request Headers: + +   + + - **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + + Status Codes: + + - **200** – no error + - **404** – no such image + - **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + + **Example request**: + + POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 + + **Example response**: + + HTTP/1.1 201 OK + + Query Parameters: + +   + + - **repo** – The repository to tag in + - **force** – 1/True/true or 0/False/false, default false + + Status Codes: + + - **201** – no error + - **400** – bad parameter + - **404** – no such image + - **409** – conflict + - **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + + **Example request**: + + DELETE /images/test HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + + Query Parameters: + +   + + - **force** – 1/True/true or 0/False/false, default false + - **noprune** – 1/True/true or 0/False/false, default false + + Status Codes: + + - **200** – no error + - **404** – no such image + - **409** – conflict + - **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + + **Example request**: + + GET /images/search?term=sshd HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + + Query Parameters: + +   + + - **term** – term to search + + Status Codes: + + - **200** – no error + - **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + + **Example request**: + + POST /build HTTP/1.1 + + {{ STREAM }} + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + + Query Parameters: + +   + + - **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success + - **q** – suppress verbose build output + - **nocache** – do not use the cache when building the image + - **rm** - remove intermediate containers after a successful build (default behavior) + - **forcerm - always remove intermediate containers (includes rm) + + Request Headers: + +   + + - **Content-type** – should be set to + `"application/tar"`. + - **X-Registry-Config** – base64-encoded ConfigFile object + + Status Codes: + + - **200** – no error + - **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + + **Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + + **Example response**: + + HTTP/1.1 200 OK + + Status Codes: + + - **200** – no error + - **204** – no error + - **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + + **Example request**: + + GET /info HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Driver":"btrfs", + "ExecutionDriver":"native-0.1", + "KernelVersion":"3.12.0-1-amd64" + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "NEventsListener":0, + "InitPath":"/usr/bin/docker", + "IndexServerAddress":["https://index.docker.io/v1/"], + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + + Status Codes: + + - **200** – no error + - **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + + **Example request**: + + GET /version HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion":"1.12", + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + + Status Codes: + + - **200** – no error + - **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + + **Example request**: + + GET /_ping HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + + OK + + Status Codes: + + - **200** - no error + - **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + + **Example request**: + + POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + + **Example response**: + + HTTP/1.1 201 OK + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + + Json Parameters: + + + + - **config** - the container's configuration + + Query Parameters: + +   + + - **container** – source container + - **repo** – repository + - **tag** – tag + - **m** – commit message + - **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + + Status Codes: + + - **201** – no error + - **404** – no such container + - **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get events from docker, either in real time via streaming, or +via polling (using since) + + **Example request**: + + GET /events?since=1374067924 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + + Query Parameters: + +   + + - **since** – timestamp used for polling + - **until** – timestamp used for polling + + Status Codes: + + - **200** – no error + - **500** – server error + +### Get a tarball containing all images and tags in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. + + **Example request** + + GET /images/ubuntu/get + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + + Status Codes: + + - **200** – no error + - **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. + + **Example request** + + POST /images/load + + Tarball in body + + **Example response**: + + HTTP/1.1 200 OK + + Status Codes: + + - **200** – no error + - **500** – server error + +# 3. Going further + +## 3.1 Inside `docker run` + +Here are the steps of `docker run`: + +- Create the container + +- If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: + - Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"–api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.2.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.2.md index 37a8e1c012..2530fb90ae 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.2.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.2.md @@ -206,7 +206,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -589,6 +589,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -699,12 +706,6 @@ Push the image `name` on the registry {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.3.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.3.md index b510f660fd..ff6fcac944 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.3.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.3.md @@ -208,7 +208,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -639,6 +639,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -748,12 +755,6 @@ Push the image `name` on the registry {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.4.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.4.md index 0e49402621..77d8e15504 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.4.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.4.md @@ -213,7 +213,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -685,6 +685,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -794,12 +801,6 @@ Push the image `name` on the registry {"status":"Pushing..."} {"status":"Pushing", "progress":"1/? (n/a)"} {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error :statuscode 404: no such image :statuscode diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.5.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.5.md index 33c1aeca1e..abf6e3397a 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.5.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.5.md @@ -211,7 +211,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -686,6 +686,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -798,12 +805,6 @@ Push the image `name` on the registry The `X-Registry-Auth` header can be used to include a base64-encoded AuthConfig object. - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.6.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.6.md index 4500c1554c..11dd45229c 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -261,7 +261,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -793,6 +793,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -903,12 +910,6 @@ Push the image `name` on the registry > The `X-Registry-Auth` header can be used to > include a base64-encoded AuthConfig object. - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Status Codes: - **200** – no error :statuscode 404: no such image :statuscode diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.7.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.7.md index 402efa4262..10ff841799 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -217,7 +217,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {} } @@ -712,6 +712,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -821,12 +828,6 @@ Push the image `name` on the registry {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Request Headers:   diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.8.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.8.md index 78fccaf281..b8bc0087c8 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -237,7 +237,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { @@ -754,6 +754,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -863,12 +870,6 @@ Push the image `name` on the registry {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Request Headers:   diff --git a/components/engine/docs/sources/reference/api/docker_remote_api_v1.9.md b/components/engine/docs/sources/reference/api/docker_remote_api_v1.9.md index 741a9ac955..38f4ca8f54 100644 --- a/components/engine/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/components/engine/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -237,7 +237,7 @@ Return low-level information on the container `id` "Bridge": "", "PortMapping": null }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", "ResolvConfPath": "/etc/resolv.conf", "Volumes": {}, "HostConfig": { @@ -758,6 +758,13 @@ Insert a file from `url` in the image `name` at `path` {"error":"Invalid..."} ... + Query Parameters: + + + + - **url** – The url from where the file is taken + - **path** – The path where the file is stored + Status Codes: - **200** – no error @@ -867,12 +874,6 @@ Push the image `name` on the registry {"error":"Invalid..."} ... - Query Parameters: - -   - - - **registry** – the registry you wan to push, optional - Request Headers:   diff --git a/components/engine/docs/sources/reference/api/hub_registry_spec.md b/components/engine/docs/sources/reference/api/hub_registry_spec.md index 1a2cf9423d..b2d29ab4af 100644 --- a/components/engine/docs/sources/reference/api/hub_registry_spec.md +++ b/components/engine/docs/sources/reference/api/hub_registry_spec.md @@ -35,7 +35,7 @@ managed by Docker Inc. service using tokens - It supports different storage backends (S3, cloud files, local FS) - It doesn't have a local database - - [Source Code](https://github.com/dotcloud/docker-registry) + - [Source Code](https://github.com/docker/docker-registry) We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries: @@ -479,7 +479,7 @@ file is empty. POST /v1/users: **Body**: - {"email": "[sam@dotcloud.com](mailto:sam%40dotcloud.com)", + {"email": "[sam@docker.com](mailto:sam%40docker.com)", "password": "toto42", "username": "foobar"`} **Validation**: diff --git a/components/engine/docs/sources/reference/api/registry_api.md b/components/engine/docs/sources/reference/api/registry_api.md index 2840693fa8..49776b9b18 100644 --- a/components/engine/docs/sources/reference/api/registry_api.md +++ b/components/engine/docs/sources/reference/api/registry_api.md @@ -57,7 +57,7 @@ grasp the context, here are some examples of registries: > **Note**: > The latter implies that while HTTP is the protocol of choice for a registry, > multiple schemes are possible (and in some cases, trivial): -> +> > - HTTP with GET (and PUT for read-write registries); > - local mount point; > - remote docker addressed through SSH. @@ -67,6 +67,8 @@ The latter would only require two new commands in docker, e.g., (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g., with public keys). +The default namespace for a private repository is `library`. + # Endpoints ## Images @@ -305,7 +307,7 @@ Get all of the tags for the given repo. **Example Request**: - GET /v1/repositories/foo/bar/tags HTTP/1.1 + GET /v1/repositories/reynholm/help-system-server/tags HTTP/1.1 Host: registry-1.docker.io Accept: application/json Content-Type: application/json @@ -335,13 +337,13 @@ Get all of the tags for the given repo. - **401** – Requires authorization - **404** – Repository not found -`GET /v1/repositories/(namespace)/(repository)/tags/(tag*): +`GET /v1/repositories/(namespace)/(repository)/tags/(tag*)` Get a tag for the given repo. **Example Request**: - GET /v1/repositories/foo/bar/tags/latest HTTP/1.1 + GET /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 Host: registry-1.docker.io Accept: application/json Content-Type: application/json @@ -369,13 +371,13 @@ Get a tag for the given repo. - **401** – Requires authorization - **404** – Tag not found -`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*): +`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)` Delete the tag for the repo **Example Request**: - DELETE /v1/repositories/foo/bar/tags/latest HTTP/1.1 + DELETE /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 Host: registry-1.docker.io Accept: application/json Content-Type: application/json @@ -402,13 +404,13 @@ Delete the tag for the repo - **401** – Requires authorization - **404** – Tag not found -`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*): +`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)` Put a tag for the given repo. **Example Request**: - PUT /v1/repositories/foo/bar/tags/latest HTTP/1.1 + PUT /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 Host: registry-1.docker.io Accept: application/json Content-Type: application/json @@ -446,7 +448,7 @@ Delete a repository **Example Request**: - DELETE /v1/repositories/foo/bar/ HTTP/1.1 + DELETE /v1/repositories/reynholm/help-system-server/ HTTP/1.1 Host: registry-1.docker.io Accept: application/json Content-Type: application/json diff --git a/components/engine/docs/sources/reference/api/remote_api_client_libraries.md b/components/engine/docs/sources/reference/api/remote_api_client_libraries.md index d1d26a1ddf..8f50804368 100644 --- a/components/engine/docs/sources/reference/api/remote_api_client_libraries.md +++ b/components/engine/docs/sources/reference/api/remote_api_client_libraries.md @@ -140,5 +140,17 @@ will add the libraries here. https://github.com/spotify/docker-client Active + + Groovy + docker-client + https://github.com/gesellix-docker/docker-client + Active + + + Java + jclouds-docker + https://github.com/jclouds/jclouds-labs/tree/master/docker + Active + diff --git a/components/engine/docs/sources/reference/builder.md b/components/engine/docs/sources/reference/builder.md index 91190933c9..796d07d98e 100644 --- a/components/engine/docs/sources/reference/builder.md +++ b/components/engine/docs/sources/reference/builder.md @@ -4,15 +4,17 @@ page_keywords: builder, docker, Dockerfile, automation, image creation # Dockerfile Reference -**Docker can act as a builder** and read instructions from a text *Dockerfile* -to automate the steps you would otherwise take manually to create an image. -Executing `docker build` will run your steps and commit them along the way, -giving you a final image. +**Docker can build images automatically** by reading the instructions +from a `Dockerfile`. A `Dockerfile` is a text document that contains all +the commands you would normally execute manually in order to build a +Docker image. By calling `docker build` from your terminal, you can have +Docker build your image step by step, executing the instructions +successively. ## Usage To [*build*](../commandline/cli/#cli-build) an image from a source repository, -create a description file called Dockerfile at the root of your repository. +create a description file called `Dockerfile` at the root of your repository. This file will describe the steps to assemble the image. Then call `docker build` with the path of your source repository as the argument @@ -25,6 +27,19 @@ the build. The build is run by the Docker daemon, not by the CLI, so the whole context must be transferred to the daemon. The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to the daemon. +> **Warning** +> Avoid using your root directory, `/`, as the root of the source repository. The +> `docker build` command will use whatever directory contains the Dockerfile as the build +> context (including all of its subdirectories). The build context will be sent to the +> Docker daemon before building the image, which means if you use `/` as the source +> repository, the entire contents of your hard drive will get sent to the daemon (and +> thus to the machine running the daemon). You probably don't want that. + +In most cases, it's best to put each Dockerfile in an empty directory, and then add only +the files needed for building that Dockerfile to that directory. To further speed up the +build, you can exclude files and directories by adding a `.dockerignore` file to the same +directory. + You can specify a repository and tag at which to save the new image if the build succeeds: @@ -55,13 +70,12 @@ accelerating `docker build` significantly (indicated by `Using cache`): ---> 1a5ffc17324d Successfully built 1a5ffc17324d -When you're done with your build, you're ready to look into -[*Pushing a repository to its registry*]( -/userguide/dockerrepos/#image-push). +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*]( /userguide/dockerrepos/#image-push). ## Format -Here is the format of the Dockerfile: +Here is the format of the `Dockerfile`: # Comment INSTRUCTION arguments @@ -69,8 +83,8 @@ Here is the format of the Dockerfile: The Instruction is not case-sensitive, however convention is for them to be UPPERCASE in order to distinguish them from arguments more easily. -Docker evaluates the instructions in a Dockerfile in order. **The first -instruction must be \`FROM\`** in order to specify the [*Base +Docker runs the instructions in a `Dockerfile` in order. **The +first instruction must be \`FROM\`** in order to specify the [*Base Image*](/terms/image/#base-image-def) from which you are building. Docker will treat lines that *begin* with `#` as a @@ -80,10 +94,10 @@ be treated as an argument. This allows statements like: # Comment RUN echo 'we are running some # of cool things' -Here is the set of instructions you can use in a Dockerfile -for building images. +Here is the set of instructions you can use in a `Dockerfile` for building +images. -## .dockerignore +## The `.dockerignore` file If a file named `.dockerignore` exists in the source repository, then it is interpreted as a newline-separated list of exclusion patterns. @@ -124,15 +138,15 @@ Or FROM : The `FROM` instruction sets the [*Base Image*](/terms/image/#base-image-def) -for subsequent instructions. As such, a valid Dockerfile must have `FROM` as +for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as its first instruction. The image can be any valid image – it is especially easy to start by **pulling an image** from the [*Public Repositories*]( /userguide/dockerrepos/#using-public-repositories). -`FROM` must be the first non-comment instruction in the Dockerfile. +`FROM` must be the first non-comment instruction in the `Dockerfile`. -`FROM` can appear multiple times within a single Dockerfile in order to create -multiple images. Simply make a note of the last image id output by the commit +`FROM` can appear multiple times within a single `Dockerfile` in order to create +multiple images. Simply make a note of the last image ID output by the commit before each new `FROM` command. If no `tag` is given to the `FROM` instruction, `latest` is assumed. If the @@ -154,7 +168,7 @@ RUN has 2 forms: The `RUN` instruction will execute any commands in a new layer on top of the current image and commit the results. The resulting committed image will be -used for the next step in the Dockerfile. +used for the next step in the `Dockerfile`. Layering `RUN` instructions and generating commits conforms to the core concepts of Docker where commits are cheap and containers can be created from @@ -163,44 +177,52 @@ any point in an image's history, much like source control. The *exec* form makes it possible to avoid shell string munging, and to `RUN` commands using a base image that does not contain `/bin/sh`. -The cache for `RUN` instructions isn't invalidated automatically during the -next build. The cache for an instruction like `RUN apt-get dist-upgrade -y` -will be reused during the next build. -The cache for `RUN` instructions can be invalidated by using the `--no-cache` -flag, for example `docker build --no-cache`. +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` -The first encountered `ADD` instruction will invalidate the cache for all -following instructions from the 'Dockerfile' if the contents of the context -have changed. This will also invalidate the cache for `RUN` instructions. +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like `RUN apt-get +dist-upgrade -y` will be reused during the next build. The cache for +`RUN` instructions can be invalidated by using the `--no-cache` flag, +for example `docker build --no-cache`. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. ### Known Issues (RUN) -- [Issue 783](https://github.com/dotcloud/docker/issues/783) is about file +- [Issue 783](https://github.com/docker/docker/issues/783) is about file permissions problems that can occur when using the AUFS file system. You might notice it during an attempt to `rm` a file, for example. The issue describes a workaround. -- [Issue 2424](https://github.com/dotcloud/docker/issues/2424) Locale will - not be set automatically. ## CMD -CMD has three forms: +The `CMD` instruction has three forms: - `CMD ["executable","param1","param2"]` (like an *exec*, this is the preferred form) - `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) - `CMD command param1 param2` (as a *shell*) -There can only be one CMD in a Dockerfile. If you list more than one CMD -then only the last CMD will take effect. +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. -**The main purpose of a CMD is to provide defaults for an executing +**The main purpose of a `CMD` is to provide defaults for an executing container.** These defaults can include an executable, or they can omit -the executable, in which case you must specify an ENTRYPOINT as well. +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. When used in the shell or exec formats, the `CMD` instruction sets the command to be executed when running the image. -If you use the *shell* form of the CMD, then the `` will execute in +If you use the *shell* form of the `CMD`, then the `` will execute in `/bin/sh -c`: FROM ubuntu @@ -208,7 +230,7 @@ If you use the *shell* form of the CMD, then the `` will execute in If you want to **run your** `` **without a shell** then you must express the command as a JSON array and give the full path to the executable. -**This array form is the preferred format of CMD.** Any additional parameters +**This array form is the preferred format of `CMD`.** Any additional parameters must be individually expressed as strings in the array: FROM ubuntu @@ -219,7 +241,7 @@ you should consider using `ENTRYPOINT` in combination with `CMD`. See [*ENTRYPOINT*](#entrypoint). If the user specifies arguments to `docker run` then they will override the -default specified in CMD. +default specified in `CMD`. > **Note**: > don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits @@ -265,26 +287,30 @@ being built (also called the *context* of the build) or a remote file URL. `` is the absolute path to which the source will be copied inside the destination container. -All new files and directories are created with a uid and gid of 0. +All new files and directories are created with a UID and GID of 0. -In the case where `` is a remote file URL, the destination will have permissions 600. +In the case where `` is a remote file URL, the destination will +have permissions of 600. > **Note**: -> If you build by passing a Dockerfile through STDIN (`docker build - < somefile`), -> there is no build context, so the Dockerfile can only contain a URL -> based ADD statement. +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will get used at the context of the build. -> You can also pass a compressed archive through STDIN: -> (`docker build - < archive.tar.gz`), the `Dockerfile` at the root of -> the archive and the rest of the archive will get used at the context -> of the build. -> > **Note**: -> If your URL files are protected using authentication, you will need to -> use `RUN wget` , `RUN curl` -> or use another tool from within the container as ADD does not support +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support > authentication. +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. + The copy obeys the following rules: - The `` path must be inside the *context* of the build; @@ -310,9 +336,9 @@ The copy obeys the following rules: from *remote* URLs are **not** decompressed. When a directory is copied or unpacked, it has the same behavior as `tar -x`: the result is the union of: - 1. whatever existed at the destination path and - 2. the contents of the source tree, with conflicts resolved in favor of - "2." on a file-by-file basis. + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. - If `` is any other kind of file, it is copied individually along with its metadata. In this case, if `` ends with a trailing slash `/`, it @@ -338,7 +364,7 @@ being built (also called the *context* of the build). `` is the absolute path to which the source will be copied inside the destination container. -All new files and directories are created with a uid and gid of 0. +All new files and directories are created with a UID and GID of 0. > **Note**: > If you build using STDIN (`docker build - < somefile`), there is no @@ -370,41 +396,47 @@ The copy obeys the following rules: ENTRYPOINT has two forms: - `ENTRYPOINT ["executable", "param1", "param2"]` - (like an *exec*, preferred form) + (like an *exec*, the preferred form) - `ENTRYPOINT command param1 param2` (as a *shell*) -There can only be one `ENTRYPOINT` in a Dockerfile. If you have more than one -`ENTRYPOINT`, then only the last one in the Dockerfile will have an effect. +There can only be one `ENTRYPOINT` in a `Dockerfile`. If you have more +than one `ENTRYPOINT`, then only the last one in the `Dockerfile` will +have an effect. -An `ENTRYPOINT` helps you to configure a container that you can run as an -executable. That is, when you specify an `ENTRYPOINT`, then the whole container -runs as if it was just that executable. +An `ENTRYPOINT` helps you to configure a container that you can run as +an executable. That is, when you specify an `ENTRYPOINT`, then the whole +container runs as if it was just that executable. -The `ENTRYPOINT` instruction adds an entry command that will **not** be -overwritten when arguments are passed to `docker run`, unlike the behavior -of `CMD`. This allows arguments to be passed to the entrypoint. i.e. -`docker run -d` will pass the "-d" argument to the ENTRYPOINT. +Unlike the behavior of the `CMD` instruction, The `ENTRYPOINT` +instruction adds an entry command that will **not** be overwritten when +arguments are passed to `docker run`. This allows arguments to be passed +to the entry point, i.e. `docker run -d` will pass the `-d` +argument to the entry point. -You can specify parameters either in the ENTRYPOINT JSON array (as in -"like an exec" above), or by using a CMD statement. Parameters in the -ENTRYPOINT will not be overridden by the `docker run` -arguments, but parameters specified via CMD will be overridden -by `docker run` arguments. +You can specify parameters either in the `ENTRYPOINT` JSON array (as in +"like an exec" above), or by using a `CMD` instruction. Parameters in +the `ENTRYPOINT` instruction will not be overridden by the `docker run` +arguments, but parameters specified via a `CMD` instruction will be +overridden by `docker run` arguments. -Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it will -execute in `/bin/sh -c`: +Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it +will execute in `/bin/sh -c`: FROM ubuntu - ENTRYPOINT wc -l - + ENTRYPOINT ls -l -For example, that Dockerfile's image will *always* take STDIN as input -("-") and print the number of lines ("-l"). If you wanted to make this -optional but default, you could use a CMD: +For example, that `Dockerfile`'s image will *always* take a directory as +an input and return a directory listing. If you wanted to make this +optional but default, you could use a `CMD` instruction: FROM ubuntu - CMD ["-l", "-"] - ENTRYPOINT ["/usr/bin/wc"] + CMD ["-l"] + ENTRYPOINT ["ls"] + +> **Note**: +> It is preferable to use the JSON array format for specifying +> `ENTRYPOINT` instructions. ## VOLUME @@ -421,34 +453,37 @@ instructions via the Docker client, refer to [*Share Directories via Volumes*]( USER daemon -The `USER` instruction sets the username or UID to use when running the image +The `USER` instruction sets the user name or UID to use when running the image and for any following `RUN` directives. ## WORKDIR WORKDIR /path/to/workdir -The `WORKDIR` instruction sets the working directory for the `RUN`, `CMD` and -`ENTRYPOINT` Dockerfile commands that follow it. +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD` and +`ENTRYPOINT` instructions that follow it in the `Dockerfile`. -It can be used multiple times in the one Dockerfile. If a relative path +It can be used multiple times in the one `Dockerfile`. If a relative path is provided, it will be relative to the path of the previous `WORKDIR` instruction. For example: - WORKDIR /a WORKDIR b WORKDIR c RUN pwd + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd -The output of the final `pwd` command in this -Dockerfile would be `/a/b/c`. +The output of the final `pwd` command in this Dockerfile would be +`/a/b/c`. ## ONBUILD ONBUILD [INSTRUCTION] -The `ONBUILD` instruction adds to the image a -"trigger" instruction to be executed at a later time, when the image is -used as the base for another build. The trigger will be executed in the -context of the downstream build, as if it had been inserted immediately -after the *FROM* instruction in the downstream Dockerfile. +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. Any build instruction can be registered as a trigger. @@ -456,33 +491,33 @@ This is useful if you are building an image which will be used as a base to build other images, for example an application build environment or a daemon which may be customized with user-specific configuration. -For example, if your image is a reusable python application builder, it +For example, if your image is a reusable Python application builder, it will require application source code to be added in a particular directory, and it might require a build script to be called *after* -that. You can't just call *ADD* and *RUN* now, because you don't yet +that. You can't just call `ADD` and `RUN` now, because you don't yet have access to the application source code, and it will be different for each application build. You could simply provide application developers -with a boilerplate Dockerfile to copy-paste into their application, but +with a boilerplate `Dockerfile` to copy-paste into their application, but that is inefficient, error-prone and difficult to update because it mixes with application-specific code. -The solution is to use *ONBUILD* to register in advance instructions to +The solution is to use `ONBUILD` to register advance instructions to run later, during the next build stage. Here's how it works: -1. When it encounters an *ONBUILD* instruction, the builder adds a +1. When it encounters an `ONBUILD` instruction, the builder adds a trigger to the metadata of the image being built. The instruction does not otherwise affect the current build. 2. At the end of the build, a list of all triggers is stored in the - image manifest, under the key *OnBuild*. They can be inspected with - *docker inspect*. + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. 3. Later the image may be used as a base for a new build, using the - *FROM* instruction. As part of processing the *FROM* instruction, - the downstream builder looks for *ONBUILD* triggers, and executes + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes them in the same order they were registered. If any of the triggers - fail, the *FROM* instruction is aborted which in turn causes the - build to fail. If all triggers succeed, the FROM instruction + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction completes and the build continues as usual. 4. Triggers are cleared from the final image after being executed. In other words they are not inherited by "grand-children" builds. @@ -494,9 +529,9 @@ For example you might add something like this: ONBUILD RUN /usr/local/bin/python-build --dir /app/src [...] -> **Warning**: Chaining ONBUILD instructions using ONBUILD ONBUILD isn't allowed. +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. -> **Warning**: ONBUILD may not trigger FROM or MAINTAINER instructions. +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. ## Dockerfile Examples @@ -507,23 +542,16 @@ For example you might add something like this: FROM ubuntu MAINTAINER Victor Vieux - # make sure the package repository is up to date - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list - RUN apt-get update - - RUN apt-get install -y inotify-tools nginx apache2 openssh-server + RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server # Firefox over VNC # # VERSION 0.3 FROM ubuntu - # make sure the package repository is up to date - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list - RUN apt-get update # Install vnc, xvfb in order to create a 'fake' display and firefox - RUN apt-get install -y x11vnc xvfb firefox + RUN apt-get update && apt-get install -y x11vnc xvfb firefox RUN mkdir /.vnc # Setup a password RUN x11vnc -storepasswd 1234 ~/.vnc/passwd @@ -547,3 +575,4 @@ For example you might add something like this: # You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with # /oink. + diff --git a/components/engine/docs/sources/reference/commandline/cli.md b/components/engine/docs/sources/reference/commandline/cli.md index 301593f2f1..7b9e2ab610 100644 --- a/components/engine/docs/sources/reference/commandline/cli.md +++ b/components/engine/docs/sources/reference/commandline/cli.md @@ -65,15 +65,14 @@ expect an integer, and they can only be specified once. -H, --host=[] The socket(s) to bind to in daemon mode specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --icc=true Enable inter-container communication - --ip="0.0.0.0" Default IP address to use when binding container ports + --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --iptables=true Enable Docker's addition of iptables rules --mtu=0 Set the containers network MTU if no value is provided: default to the default route MTU or 1500 if no default route is available -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file - -r, --restart=true Restart previously running containers -s, --storage-driver="" Force the Docker runtime to use a specific storage driver - --selinux-enabled=false Enable selinux support + --selinux-enabled=false Enable selinux support. SELinux does not presently support the BTRFS storage driver --storage-opt=[] Set storage driver options --tls=false Use TLS; implied by tls-verify flags --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here @@ -117,15 +116,14 @@ you can also specify individual sockets too `docker -d -H fd://3`. If the specified socket activated files aren't found then docker will exit. You can find examples of using systemd socket activation with docker and systemd in the [docker source tree]( -https://github.com/dotcloud/docker/blob/master/contrib/init/systemd/socket-activation/). +https://github.com/docker/docker/blob/master/contrib/init/systemd/socket-activation/). Docker supports softlinks for the Docker data directory -(`/var/lib/docker`) and for `/tmp`. TMPDIR and the data directory can be set -like this: +(`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this: - TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 # or - export TMPDIR=/mnt/disk2/tmp + export DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 ## attach @@ -135,7 +133,7 @@ like this: Attach to a running container --no-stdin=false Do not attach STDIN - --sig-proxy=true Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied. + --sig-proxy=true Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The `attach` command will allow you to view or interact with any running container, detached (`-d`) @@ -324,7 +322,7 @@ schema. > **Note:** `docker build` will return a `no such file or directory` error > if the file or directory does not exist in the uploaded context. This may -> happen if there is no context, or if you specify a file that is elsewhere +> happen if there is no context, or if you specify a file that is elsewhere > on the Host system. The context is limited to the current directory (and its > children) for security reasons, and to ensure repeatable builds on remote > Docker hosts. This is also the reason why `ADD ../file` will not work. @@ -396,9 +394,9 @@ For example: A /go A /go/src A /go/src/github.com - A /go/src/github.com/dotcloud - A /go/src/github.com/dotcloud/docker - A /go/src/github.com/dotcloud/docker/.git + A /go/src/github.com/docker + A /go/src/github.com/docker/docker + A /go/src/github.com/docker/docker/.git .... ## events @@ -425,24 +423,24 @@ You'll need two shells for this example. **Shell 1: (Again .. now showing events):** - [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop **Show events in the past from a specified time:** $ sudo docker events --since 1378216169 - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop + 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die + 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop $ sudo docker events --since '2013-09-03' - [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST' - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop ## export @@ -500,7 +498,7 @@ by default. 77af4d6b9913 19 hours ago 1.089 GB committest latest b6fa739cedf5 19 hours ago 1.089 GB 78a85c484f71 19 hours ago 1.089 GB - $ docker latest 30557a29d5ab 20 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB 0124422dd9f9 20 hours ago 1.089 GB 18ad6fad3402 22 hours ago 1.082 GB f9f1e26352f0 23 hours ago 1.089 GB @@ -514,7 +512,7 @@ by default. 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB - $ docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB @@ -605,18 +603,18 @@ tar, then the ownerships might not get preserved. For example: $ sudo docker -D info - Containers: 16 - Images: 2138 + Containers: 14 + Images: 52 Storage Driver: btrfs - Execution Driver: native-0.1 - Kernel Version: 3.12.0-1-amd64 + Execution Driver: native-0.2 + Kernel Version: 3.13.0-24-generic + Operating System: Ubuntu 14.04 LTS Debug mode (server): false Debug mode (client): true - Fds: 16 - Goroutines: 104 + Fds: 10 + Goroutines: 9 EventsListeners: 0 Init Path: /usr/bin/docker - Sockets: [unix:///var/run/docker.sock tcp://0.0.0.0:4243] Username: svendowideit Registry: [https://index.docker.io/v1/] @@ -732,6 +730,16 @@ specify this by adding the server name. example: $ docker login localhost:8080 +## logout + + Usage: docker logout [SERVER] + + Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default. + +For example: + + $ docker logout localhost:8080 + ## logs Usage: docker logs CONTAINER @@ -750,12 +758,32 @@ the container's `STDOUT` and `STDERR`. Passing a negative number or a non-integer to `--tail` is invalid and the value is set to `all` in that case. This behavior may change in the future. +The `docker logs --timestamp` commands will add an RFC3339Nano +timestamp, for example `2014-05-10T17:42:14.999999999Z07:00`, to each +log entry. + ## port Usage: docker port CONTAINER PRIVATE_PORT Lookup the public-facing port that is NAT-ed to PRIVATE_PORT +## pause + + Usage: docker pause CONTAINER + + Pause all processes within a container + +The `docker pause` command uses the cgroups freezer to suspend all processes in +a container. Traditionally when suspending a process the `SIGSTOP` signal is +used, which is observable by the process being suspended. With the cgroups freezer +the process is unaware, and unable to capture, that it is being suspended, +and subsequently resumed. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + ## ps Usage: docker ps [OPTIONS] @@ -764,6 +792,8 @@ value is set to `all` in that case. This behavior may change in the future. -a, --all=false Show all containers. Only running containers are shown by default. --before="" Show only container created before Id or Name, include non-running ones. + -f, --filter=[] Provide filter values. Valid filters: + exited= - containers with exit code of -l, --latest=false Show only the latest created container, include non-running ones. -n=-1 Show n last created containers, include non-running ones. --no-trunc=false Don't truncate output @@ -781,6 +811,25 @@ Running `docker ps` showing 2 linked containers. `docker ps` will show only running containers by default. To see all containers: `docker ps -a` +### Filtering + +The filtering flag (-f or --filter) format is a "key=value" pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +Current filters: + * exited (int - the code of exited containers. Only useful with '--all') + + +#### Successfully exited containers + + $ sudo docker ps -a --filter 'exited=0' + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey + 106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani + 48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds + +This shows all the containers that have exited with status of '0' + ## pull Usage: docker pull NAME[:TAG] @@ -834,13 +883,13 @@ registry or to a self-hosted one. Remove one or more containers - -f, --force=false Force removal of running container + -f, --force=false Force the removal of a running container (uses SIGKILL) -l, --link=false Remove the specified link and not the underlying container -v, --volumes=false Remove the volumes associated with the container ### Known Issues (rm) -- [Issue 197](https://github.com/dotcloud/docker/issues/197) indicates +- [Issue 197](https://github.com/docker/docker/issues/197) indicates that `docker kill` may leave directories behind and make it difficult to remove the container. @@ -859,7 +908,12 @@ This will remove the underlying link between `/webapp` and the `/redis` containers removing all network communication. - $ sudo docker rm $(docker ps -a -q) + $ sudo docker rm --force redis + redis + +The main process inside the container referenced under the link `/redis` will receive +SIGKILL, then the container will be removed. + This command will delete all stopped containers. The command `docker ps -a -q` will return all existing container @@ -909,20 +963,23 @@ removed before the image is removed. Run a command in a new container - -a, --attach=[] Attach to stdin, stdout or stderr. + -a, --attach=[] Attach to STDIN, STDOUT or STDERR. -c, --cpu-shares=0 CPU shares (relative weight) + --cap-add=[] Add Linux capabilities + --cap-drop=[] Drop Linux capabilities --cidfile="" Write the container ID to the file --cpuset="" CPUs in which to allow execution (0-3, 0,1) - -d, --detach=false Detached mode: Run container in the background, print new container id - --dns=[] Set custom dns servers - --dns-search=[] Set custom dns search domains + -d, --detach=false Detached mode: run container in the background and print new container ID + --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + --dns=[] Set custom DNS servers + --dns-search=[] Set custom DNS search domains -e, --env=[] Set environment variables - --entrypoint="" Overwrite the default entrypoint of the image - --env-file=[] Read in a line delimited file of ENV variables + --entrypoint="" Overwrite the default ENTRYPOINT of the image + --env-file=[] Read in a line delimited file of environment variables --expose=[] Expose a port from the container without publishing it to your host -h, --hostname="" Container host name - -i, --interactive=false Keep stdin open even if not attached - --link=[] Add link to another container (name:alias) + -i, --interactive=false Keep STDIN open even if not attached + --link=[] Add link to another container in the form of name:alias --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="" Memory limit (format: , where unit = b, k, m or g) --name="" Assign a name to the container @@ -936,11 +993,12 @@ removed before the image is removed. format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort (use 'docker port' to see the actual mapping) --privileged=false Give extended privileges to this container + --restart="" Restart policy to apply when a container exits (no, on-failure, always) --rm=false Automatically remove the container when it exits (incompatible with -d) - --sig-proxy=true Proxify received signals to the process (even in non-tty mode). SIGCHLD is not proxied. - -t, --tty=false Allocate a pseudo-tty + --sig-proxy=true Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. + -t, --tty=false Allocate a pseudo-TTY -u, --user="" Username or UID - -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from docker: -v /container) + -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) --volumes-from=[] Mount volumes from the specified container(s) -w, --workdir="" Working directory inside the container @@ -960,7 +1018,7 @@ and linking containers. ### Known Issues (run –volumes-from) -- [Issue 2702](https://github.com/dotcloud/docker/issues/2702): +- [Issue 2702](https://github.com/docker/docker/issues/2702): "lxc-start: Permission denied - failed to mount" could indicate a permissions problem with AppArmor. Please see the issue for a workaround. @@ -1087,14 +1145,14 @@ network and environment of the `redis` container via environment variables. The `--name` flag will assign the name `console` to the newly created container. - $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd + $ sudo docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd The `--volumes-from` flag mounts all the defined volumes from the referenced -containers. Containers can be specified by a comma separated list or by -repetitions of the `--volumes-from` argument. The container ID may be -optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only -or read-write mode, respectively. By default, the volumes are mounted in -the same mode (read write or read only) as the reference container. +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` or `STDERR`. This makes it possible to manipulate the output and input as needed. @@ -1118,6 +1176,20 @@ logs could be retrieved using `docker logs`. This is useful if you need to pipe a file or something else into a container and retrieve the container's ID once the container has finished running. + $ sudo docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo} + brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc + brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd + crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo + +It is often necessary to directly expose devices to a container. ``--device`` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the ``--privileged`` flag) and have the application directly access it. + +** Security note: ** + +``--device`` cannot be safely used with ephemeral devices. Block devices that may be removed should not be added to untrusted containers with ``--device``! + **A complete example:** $ sudo docker run -d --name static static-web-files sh @@ -1149,6 +1221,31 @@ application change: `--rm` option means that when the container exits, the container's layer is removed. +#### Restart Policies + +Using the `--restart` flag on Docker run you can specify a restart policy for +how a container should or should not be restarted on exit. + +** no ** - Do not restart the container when it exits. + +** on-failure ** - Restart the container only if it exits with a non zero exit status. + +** always ** - Always restart the container reguardless of the exit status. + +You can also specify the maximum amount of times Docker will try to restart the +container when using the ** on-failure ** policy. The default is that Docker will try forever to restart the container. + + $ sudo docker run --restart=always redis + +This will run the `redis` container with a restart policy of ** always ** so that if +the container exits, Docker will restart it. + + $ sudo docker run --restart=on-failure:10 redis + +This will run the `redis` container with a restart policy of ** on-failure ** and a +maximum restart count of 10. If the `redis` container exits with a non-zero exit +status more than 10 times in a row Docker will abort trying to restart the container. + ## save Usage: docker save IMAGE @@ -1197,7 +1294,7 @@ more details on finding shared images from the command line. -a, --attach=false Attach container's STDOUT and STDERR and forward all signals to the process -i, --interactive=false Attach container's STDIN -When run on a container that has already been started, +When run on a container that has already been started, takes no action and succeeds unconditionally. ## stop @@ -1213,7 +1310,7 @@ grace period, SIGKILL ## tag - Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] + Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] Tag an image into a repository @@ -1229,6 +1326,19 @@ them to [*Share Images via Repositories*]( Display the running processes of a container +## unpause + + Usage: docker unpause CONTAINER + + Unpause all processes within a container + +The `docker unpause` command uses the cgroups freezer to un-suspend all +processes in a container. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for +further details. + ## version Usage: docker version diff --git a/components/engine/docs/sources/reference/run.md b/components/engine/docs/sources/reference/run.md index a539ab0d18..a933a32bea 100644 --- a/components/engine/docs/sources/reference/run.md +++ b/components/engine/docs/sources/reference/run.md @@ -11,7 +11,7 @@ its own networking, and its own isolated process tree. The defaults related to the binary to run, the networking to expose, and more, but `docker run` gives final control to the operator who starts the container from the image. That's the main reason -[*run*](/reference/commandline/cli/#cli-run) has more options than any +[*run*](/reference/commandline/cli/#run) has more options than any other `docker` command. ## General Form @@ -21,23 +21,21 @@ The basic `docker run` command takes this form: $ docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] To learn how to interpret the types of `[OPTIONS]`, -see [*Option types*](/reference/commandline/cli/#cli-options). +see [*Option types*](/reference/commandline/cli/#option-types). The list of `[OPTIONS]` breaks down into two groups: 1. Settings exclusive to operators, including: - - - Detached or Foreground running, - - Container Identification, - - Network settings, and - - Runtime Constraints on CPU and Memory - - Privileges and LXC Configuration - -2. Setting shared between operators and developers, where operators can + * Detached or Foreground running, + * Container Identification, + * Network settings, and + * Runtime Constraints on CPU and Memory + * Privileges and LXC Configuration +2. Settings shared between operators and developers, where operators can override defaults developers set in images at build time. -Together, the `docker run [OPTIONS]` give complete control over runtime -behavior to the operator, allowing them to override all defaults set by +Together, the `docker run [OPTIONS]` give the operator complete control over runtime +behavior, allowing them to override all defaults set by the developer during `docker build` and nearly all the defaults set by the Docker runtime itself. @@ -55,7 +53,7 @@ following options. - [Network Settings](#network-settings) - [Clean Up (--rm)](#clean-up-rm) - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) - - [Runtime Privilege and LXC Configuration](#runtime-privilege-and-lxc-configuration) + - [Runtime Privilege, Linux Capabilities, and LXC Configuration](#runtime-privilege-linux-capabilities-and-lxc-configuration) ## Detached vs Foreground @@ -88,7 +86,7 @@ and pass along signals. All of that is configurable: -i=false : Keep STDIN open even if not attached If you do not specify `-a` then Docker will [attach all standard -streams]( https://github.com/dotcloud/docker/blob/ +streams]( https://github.com/docker/docker/blob/ 75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can specify to which of the three standard streams (`STDIN`, `STDOUT`, `STDERR`) you'd like to connect instead, as in: @@ -126,6 +124,12 @@ programs might write out their process ID to a file (you've seen them as PID files): --cidfile="": Write the container ID to the file + +### Image[:tag] + +While not strictly a means of identifying a container, you can specify a version of an +image you'd like to run the container with by adding `image[:tag]` to the command. For +example, `docker run ubuntu:14.04`. ## Network Settings @@ -222,8 +226,10 @@ get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via Docker. -## Runtime Privilege and LXC Configuration +## Runtime Privilege, Linux Capabilities, and LXC Configuration + --cap-add: Add Linux capabilities + --cap-drop: Drop Linux capabilities --privileged=false: Give extended privileges to this container --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" @@ -231,7 +237,7 @@ By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a "privileged" container is given access to all devices (see [lxc-template.go]( -https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go) +https://github.com/docker/docker/blob/master/daemon/execdriver/lxc/lxc_template.go) and documentation on [cgroups devices]( https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). @@ -242,11 +248,21 @@ host as processes running outside containers on the host. Additional information about running with `--privileged` is available on the [Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). +In addition to `--privileged`, the operator can have fine grain control over the +capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default +list of capabilities that are kept. Both flags support the value `all`, so if the +operator wants to have all capabilities but `MKNOD` they could use: + + $ docker run --cap-add=ALL --cap-drop=MKNOD ... + +For interacting with the network stack, instead of using `--privileged` they +should use `--cap-add=NET_ADMIN` to modify the network interfaces. + If the Docker daemon was started using the `lxc` exec-driver (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options using one or more `--lxc-conf` parameters. These can be new parameters or override existing parameters from the [lxc-template.go]( -https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go). +https://github.com/docker/docker/blob/master/daemon/execdriver/lxc/lxc_template.go). Note that in the future, a given host's docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already familiar with using LXC directly. @@ -385,7 +401,7 @@ container running Redis: $ docker port 4241164edf6f 6379 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f -Yet we can get information about the Redis container'sexposed ports +Yet we can get information about the Redis container's exposed ports with `--link`. Choose an alias that will form a valid environment variable! @@ -423,8 +439,8 @@ mechanism to communicate with a linked container by its alias: --volumes-from="": Mount all volumes from the given container(s) The volumes commands are complex enough to have their own documentation -in section [*Share Directories via -Volumes*](/userguide/dockervolumes/#volume-def). A developer can define +in section [*Managing data in +containers*](/userguide/dockervolumes/#volume-def). A developer can define one or more `VOLUME`'s associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host). diff --git a/components/engine/docs/sources/userguide/dockerhub.md b/components/engine/docs/sources/userguide/dockerhub.md index 5bb1edec8a..62438b9948 100644 --- a/components/engine/docs/sources/userguide/dockerhub.md +++ b/components/engine/docs/sources/userguide/dockerhub.md @@ -5,8 +5,8 @@ page_keywords: documentation, docs, the docker guide, docker guide, docker, dock # Getting Started with Docker Hub -This section provides a quick introduction to the [Docker Hub](https://hub.docker.com) -and will show you how to create an account. +This section provides a quick introduction to the [Docker Hub](https://hub.docker.com), +including how to create an account. The [Docker Hub](https://hub.docker.com) is a centralized resource for working with Docker and its components. Docker Hub helps you collaborate with colleagues and get the @@ -23,7 +23,7 @@ worry, creating an account is simple and free. ## Creating a Docker Hub Account -There are two ways for you to register and create a Docker Hub account: +There are two ways for you to register and create an account: 1. Via the web, or 2. Via the command line. @@ -31,9 +31,9 @@ There are two ways for you to register and create a Docker Hub account: ### Register via the web Fill in the [sign-up form](https://hub.docker.com/account/signup/) by -choosing your user name and password and specifying email address. You can also sign up -for the Docker Weekly mailing list, which has lots of information about what's going on -in the world of Docker. +choosing your user name and password and entering a valid email address. You can also +sign up for the Docker Weekly mailing list, which has lots of information about what's +going on in the world of Docker. ![Register using the sign-up page](/userguide/register-web.png) @@ -46,10 +46,9 @@ You can also create a Docker Hub account via the command line with the ### Confirm your email -Once you've filled in the form, check your email for a welcome message and confirmation -to activate your account. +Once you've filled in the form, check your email for a welcome message asking for +confirmation so we can activate your account. -![Confirm your registration](/userguide/register-confirm.png) ### Login @@ -61,7 +60,7 @@ Or via the command line with the `docker login` command: $ sudo docker login -Your Docker Hub account is now active and ready for you to use! +Your Docker Hub account is now active and ready to use. ## Next steps diff --git a/components/engine/docs/sources/userguide/dockerimages.md b/components/engine/docs/sources/userguide/dockerimages.md index c3f5461c2f..e6583a0f82 100644 --- a/components/engine/docs/sources/userguide/dockerimages.md +++ b/components/engine/docs/sources/userguide/dockerimages.md @@ -130,7 +130,7 @@ We can see we've returned a lot of images that use the term `sinatra`. We've returned a list of image names, descriptions, Stars (which measure the social popularity of images - if a user likes an image then they can "star" it), and the Official and Automated build statuses. Official repositories are built and -maintained by the [Stackbrew](https://github.com/dotcloud/stackbrew) project, +maintained by the [Stackbrew](https://github.com/docker/stackbrew) project, and Automated repositories are [Automated Builds]( /userguide/dockerrepos/#automated-builds) that allow you to validate the source and content of an image. @@ -245,8 +245,7 @@ example now for building our own Sinatra image for our development team. # This is a comment FROM ubuntu:14.04 MAINTAINER Kate Smith - RUN apt-get -qq update - RUN apt-get -qqy install ruby ruby-dev + RUN apt-get update && apt-get install -y ruby ruby-dev RUN gem install sinatra Let's look at what our `Dockerfile` does. Each instruction prefixes a statement and is capitalized. @@ -272,38 +271,168 @@ Sinatra gem. Now let's take our `Dockerfile` and use the `docker build` command to build an image. $ sudo docker build -t="ouruser/sinatra:v2" . - Uploading context 2.56 kB - Uploading context + Sending build context to Docker daemon 2.048 kB + Sending build context to Docker daemon Step 0 : FROM ubuntu:14.04 - ---> 99ec81b80c55 + ---> e54ca5efa2e9 Step 1 : MAINTAINER Kate Smith - ---> Running in 7c5664a8a0c1 - ---> 2fa8ca4e2a13 - Removing intermediate container 7c5664a8a0c1 - Step 2 : RUN apt-get -qq update - ---> Running in b07cc3fb4256 - ---> 50d21070ec0c - Removing intermediate container b07cc3fb4256 - Step 3 : RUN apt-get -qqy install ruby ruby-dev - ---> Running in a5b038dd127e + ---> Using cache + ---> 851baf55332b + Step 2 : RUN apt-get update && apt-get install -y ruby ruby-dev + ---> Running in 3a2558904e9b Selecting previously unselected package libasan0:amd64. (Reading database ... 11518 files and directories currently installed.) Preparing to unpack .../libasan0_4.8.2-19ubuntu1_amd64.deb ... - . . . + Unpacking libasan0:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libatomic1:amd64. + Preparing to unpack .../libatomic1_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libatomic1:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libgmp10:amd64. + Preparing to unpack .../libgmp10_2%3a5.1.3+dfsg-1ubuntu1_amd64.deb ... + Unpacking libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ... + Selecting previously unselected package libisl10:amd64. + Preparing to unpack .../libisl10_0.12.2-1_amd64.deb ... + Unpacking libisl10:amd64 (0.12.2-1) ... + Selecting previously unselected package libcloog-isl4:amd64. + Preparing to unpack .../libcloog-isl4_0.18.2-1_amd64.deb ... + Unpacking libcloog-isl4:amd64 (0.18.2-1) ... + Selecting previously unselected package libgomp1:amd64. + Preparing to unpack .../libgomp1_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libgomp1:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libitm1:amd64. + Preparing to unpack .../libitm1_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libitm1:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libmpfr4:amd64. + Preparing to unpack .../libmpfr4_3.1.2-1_amd64.deb ... + Unpacking libmpfr4:amd64 (3.1.2-1) ... + Selecting previously unselected package libquadmath0:amd64. + Preparing to unpack .../libquadmath0_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libquadmath0:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libtsan0:amd64. + Preparing to unpack .../libtsan0_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libtsan0:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package libyaml-0-2:amd64. + Preparing to unpack .../libyaml-0-2_0.1.4-3ubuntu3_amd64.deb ... + Unpacking libyaml-0-2:amd64 (0.1.4-3ubuntu3) ... + Selecting previously unselected package libmpc3:amd64. + Preparing to unpack .../libmpc3_1.0.1-1ubuntu1_amd64.deb ... + Unpacking libmpc3:amd64 (1.0.1-1ubuntu1) ... + Selecting previously unselected package openssl. + Preparing to unpack .../openssl_1.0.1f-1ubuntu2.4_amd64.deb ... + Unpacking openssl (1.0.1f-1ubuntu2.4) ... + Selecting previously unselected package ca-certificates. + Preparing to unpack .../ca-certificates_20130906ubuntu2_all.deb ... + Unpacking ca-certificates (20130906ubuntu2) ... + Selecting previously unselected package manpages. + Preparing to unpack .../manpages_3.54-1ubuntu1_all.deb ... + Unpacking manpages (3.54-1ubuntu1) ... + Selecting previously unselected package binutils. + Preparing to unpack .../binutils_2.24-5ubuntu3_amd64.deb ... + Unpacking binutils (2.24-5ubuntu3) ... + Selecting previously unselected package cpp-4.8. + Preparing to unpack .../cpp-4.8_4.8.2-19ubuntu1_amd64.deb ... + Unpacking cpp-4.8 (4.8.2-19ubuntu1) ... + Selecting previously unselected package cpp. + Preparing to unpack .../cpp_4%3a4.8.2-1ubuntu6_amd64.deb ... + Unpacking cpp (4:4.8.2-1ubuntu6) ... + Selecting previously unselected package libgcc-4.8-dev:amd64. + Preparing to unpack .../libgcc-4.8-dev_4.8.2-19ubuntu1_amd64.deb ... + Unpacking libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ... + Selecting previously unselected package gcc-4.8. + Preparing to unpack .../gcc-4.8_4.8.2-19ubuntu1_amd64.deb ... + Unpacking gcc-4.8 (4.8.2-19ubuntu1) ... + Selecting previously unselected package gcc. + Preparing to unpack .../gcc_4%3a4.8.2-1ubuntu6_amd64.deb ... + Unpacking gcc (4:4.8.2-1ubuntu6) ... + Selecting previously unselected package libc-dev-bin. + Preparing to unpack .../libc-dev-bin_2.19-0ubuntu6_amd64.deb ... + Unpacking libc-dev-bin (2.19-0ubuntu6) ... + Selecting previously unselected package linux-libc-dev:amd64. + Preparing to unpack .../linux-libc-dev_3.13.0-30.55_amd64.deb ... + Unpacking linux-libc-dev:amd64 (3.13.0-30.55) ... + Selecting previously unselected package libc6-dev:amd64. + Preparing to unpack .../libc6-dev_2.19-0ubuntu6_amd64.deb ... + Unpacking libc6-dev:amd64 (2.19-0ubuntu6) ... + Selecting previously unselected package ruby. + Preparing to unpack .../ruby_1%3a1.9.3.4_all.deb ... + Unpacking ruby (1:1.9.3.4) ... + Selecting previously unselected package ruby1.9.1. + Preparing to unpack .../ruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ... + Unpacking ruby1.9.1 (1.9.3.484-2ubuntu1) ... + Selecting previously unselected package libruby1.9.1. + Preparing to unpack .../libruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ... + Unpacking libruby1.9.1 (1.9.3.484-2ubuntu1) ... + Selecting previously unselected package manpages-dev. + Preparing to unpack .../manpages-dev_3.54-1ubuntu1_all.deb ... + Unpacking manpages-dev (3.54-1ubuntu1) ... + Selecting previously unselected package ruby1.9.1-dev. + Preparing to unpack .../ruby1.9.1-dev_1.9.3.484-2ubuntu1_amd64.deb ... + Unpacking ruby1.9.1-dev (1.9.3.484-2ubuntu1) ... + Selecting previously unselected package ruby-dev. + Preparing to unpack .../ruby-dev_1%3a1.9.3.4_all.deb ... + Unpacking ruby-dev (1:1.9.3.4) ... + Setting up libasan0:amd64 (4.8.2-19ubuntu1) ... + Setting up libatomic1:amd64 (4.8.2-19ubuntu1) ... + Setting up libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ... + Setting up libisl10:amd64 (0.12.2-1) ... + Setting up libcloog-isl4:amd64 (0.18.2-1) ... + Setting up libgomp1:amd64 (4.8.2-19ubuntu1) ... + Setting up libitm1:amd64 (4.8.2-19ubuntu1) ... + Setting up libmpfr4:amd64 (3.1.2-1) ... + Setting up libquadmath0:amd64 (4.8.2-19ubuntu1) ... + Setting up libtsan0:amd64 (4.8.2-19ubuntu1) ... + Setting up libyaml-0-2:amd64 (0.1.4-3ubuntu3) ... + Setting up libmpc3:amd64 (1.0.1-1ubuntu1) ... + Setting up openssl (1.0.1f-1ubuntu2.4) ... + Setting up ca-certificates (20130906ubuntu2) ... + debconf: unable to initialize frontend: Dialog + debconf: (TERM is not set, so the dialog frontend is not usable.) + debconf: falling back to frontend: Readline + debconf: unable to initialize frontend: Readline + debconf: (This frontend requires a controlling tty.) + debconf: falling back to frontend: Teletype + Setting up manpages (3.54-1ubuntu1) ... + Setting up binutils (2.24-5ubuntu3) ... + Setting up cpp-4.8 (4.8.2-19ubuntu1) ... + Setting up cpp (4:4.8.2-1ubuntu6) ... + Setting up libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ... + Setting up gcc-4.8 (4.8.2-19ubuntu1) ... + Setting up gcc (4:4.8.2-1ubuntu6) ... + Setting up libc-dev-bin (2.19-0ubuntu6) ... + Setting up linux-libc-dev:amd64 (3.13.0-30.55) ... + Setting up libc6-dev:amd64 (2.19-0ubuntu6) ... + Setting up manpages-dev (3.54-1ubuntu1) ... + Setting up libruby1.9.1 (1.9.3.484-2ubuntu1) ... + Setting up ruby1.9.1-dev (1.9.3.484-2ubuntu1) ... + Setting up ruby-dev (1:1.9.3.4) ... Setting up ruby (1:1.9.3.4) ... Setting up ruby1.9.1 (1.9.3.484-2ubuntu1) ... Processing triggers for libc-bin (2.19-0ubuntu6) ... - ---> 2acb20f17878 - Removing intermediate container a5b038dd127e - Step 4 : RUN gem install sinatra - ---> Running in 5e9d0065c1f7 - . . . + Processing triggers for ca-certificates (20130906ubuntu2) ... + Updating certificates in /etc/ssl/certs... 164 added, 0 removed; done. + Running hooks in /etc/ca-certificates/update.d....done. + ---> c55c31703134 + Removing intermediate container 3a2558904e9b + Step 3 : RUN gem install sinatra + ---> Running in 6b81cb6313e5 + unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping + unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping + Successfully installed rack-1.5.2 + Successfully installed tilt-1.4.1 Successfully installed rack-protection-1.5.3 Successfully installed sinatra-1.4.5 4 gems installed - ---> 324104cde6ad - Removing intermediate container 5e9d0065c1f7 - Successfully built 324104cde6ad + Installing ri documentation for rack-1.5.2... + Installing ri documentation for tilt-1.4.1... + Installing ri documentation for rack-protection-1.5.3... + Installing ri documentation for sinatra-1.4.5... + Installing RDoc documentation for rack-1.5.2... + Installing RDoc documentation for tilt-1.4.1... + Installing RDoc documentation for rack-protection-1.5.3... + Installing RDoc documentation for sinatra-1.4.5... + ---> 97feabe5d2ed + Removing intermediate container 6b81cb6313e5 + Successfully built 97feabe5d2ed We've specified our `docker build` command and used the `-t` flag to identify our new image as belonging to the user `ouruser`, the repository name `sinatra` @@ -328,6 +457,11 @@ instructions have executed we're left with the `324104cde6ad` image (also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate containers will get removed to clean things up. +> **Note:** +> An image can't have more than 127 layers regardless of the storage driver. +> This limitation is set globally to encourage optimization of the overall +> size of images. + We can then create a container from our new image. $ sudo docker run -t -i ouruser/sinatra:v2 /bin/bash diff --git a/components/engine/docs/sources/userguide/dockerlinks.md b/components/engine/docs/sources/userguide/dockerlinks.md index 20a5c1a179..3624bf72c3 100644 --- a/components/engine/docs/sources/userguide/dockerlinks.md +++ b/components/engine/docs/sources/userguide/dockerlinks.md @@ -4,48 +4,47 @@ page_keywords: Examples, Usage, user guide, links, linking, docker, documentatio # Linking Containers Together -In [the Using Docker section](/userguide/usingdocker) we touched on -connecting to a service running inside a Docker container via a network -port. This is one of the ways that you can interact with services and -applications running inside Docker containers. In this section we're -going to give you a refresher on connecting to a Docker container via a -network port as well as introduce you to the concepts of container -linking. +In [the Using Docker section](/userguide/usingdocker), you saw how you can +connect to a service running inside a Docker container via a network +port. But a port connection is only one way you can interact with services and +applications running inside Docker containers. In this section, we'll briefly revisit +connecting via a network port and then we'll introduce you to another method of access: +container linking. ## Network port mapping refresher -In [the Using Docker section](/userguide/usingdocker) we created a -container that ran a Python Flask application. +In [the Using Docker section](/userguide/usingdocker), you created a +container that ran a Python Flask application: $ sudo docker run -d -P training/webapp python app.py > **Note:** > Containers have an internal network and an IP address -> (remember we used the `docker inspect` command to show the container's +> (as we saw when we used the `docker inspect` command to show the container's > IP address in the [Using Docker](/userguide/usingdocker/) section). > Docker can have a variety of network configurations. You can see more > information on Docker networking [here](/articles/networking/). -When we created that container we used the `-P` flag to automatically map any -network ports inside that container to a random high port from the range 49000 -to 49900 on our Docker host. When we subsequently ran `docker ps` we saw that -port 5000 was bound to port 49155. +When that container was created, the `-P` flag was used to automatically map any +network ports inside it to a random high port from the range 49000 +to 49900 on our Docker host. Next, when `docker ps` was run, you saw that +port 5000 in the container was bound to port 49155 on the host. $ sudo docker ps nostalgic_morse CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse -We also saw how we can bind a container's ports to a specific port using -the `-p` flag. +You also saw how you can bind a container's ports to a specific port using +the `-p` flag: $ sudo docker run -d -p 5000:5000 training/webapp python app.py -And we saw why this isn't such a great idea because it constrains us to +And you saw why this isn't such a great idea because it constrains you to only one container on that specific port. -There are also a few other ways we can configure the `-p` flag. By +There are also a few other ways you can configure the `-p` flag. By default the `-p` flag will bind the specified port to all interfaces on -the host machine. But we can also specify a binding to a specific +the host machine. But you can also specify a binding to a specific interface, for example only to the `localhost`. $ sudo docker run -d -p 127.0.0.1:5000:5000 training/webapp python app.py @@ -53,20 +52,19 @@ interface, for example only to the `localhost`. This would bind port 5000 inside the container to port 5000 on the `localhost` or `127.0.0.1` interface on the host machine. -Or to bind port 5000 of the container to a dynamic port but only on the -`localhost` we could: +Or, to bind port 5000 of the container to a dynamic port but only on the +`localhost`, you could use: $ sudo docker run -d -p 127.0.0.1::5000 training/webapp python app.py -We can also bind UDP ports by adding a trailing `/udp`, for example: +You can also bind UDP ports by adding a trailing `/udp`. For example: $ sudo docker run -d -p 127.0.0.1:5000:5000/udp training/webapp python app.py -We also saw the useful `docker port` shortcut which showed us the -current port bindings, this is also useful for showing us specific port -configurations. For example if we've bound the container port to the -`localhost` on the host machine this will be shown in the `docker port` -output. +You also learned about the useful `docker port` shortcut which showed us the +current port bindings. This is also useful for showing you specific port +configurations. For example, if you've bound the container port to the +`localhost` on the host machine, then the `docker port` output will reflect that. $ docker port nostalgic_morse 5000 127.0.0.1:49155 @@ -78,101 +76,110 @@ output. Network port mappings are not the only way Docker containers can connect to one another. Docker also has a linking system that allows you to link -multiple containers together and share connection information between -them. Docker linking will create a parent child relationship where the -parent container can see selected information about its child. +multiple containers together and send connection information from one to another. +When containers are linked, information about a source container can be sent to a +recipient container. This allows the recipient to see selected data describing +aspects of the source container. ## Container naming -To perform this linking Docker relies on the names of your containers. -We've already seen that each container we create has an automatically -created name, indeed we've become familiar with our old friend +To establish links, Docker relies on the names of your containers. +You've already seen that each container you create has an automatically +created name; indeed you've become familiar with our old friend `nostalgic_morse` during this guide. You can also name containers yourself. This naming provides two useful functions: -1. It's useful to name containers that do specific functions in a way +1. It can be useful to name containers that do specific functions in a way that makes it easier for you to remember them, for example naming a - container with a web application in it `web`. + container containing a web application `web`. 2. It provides Docker with a reference point that allows it to refer to other - containers, for example link container `web` to container `db`. + containers, for example, you can specify to link the container `web` to container `db`. You can name your container by using the `--name` flag, for example: $ sudo docker run -d -P --name web training/webapp python app.py -You can see we've launched a new container and used the `--name` flag to -call the container `web`. We can see the container's name using the +This launches a new container and uses the `--name` flag to +name the container `web`. You can see the container's name using the `docker ps` command. $ sudo docker ps -l CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES aed84ee21bde training/webapp:latest python app.py 12 hours ago Up 2 seconds 0.0.0.0:49154->5000/tcp web -We can also use `docker inspect` to return the container's name. +You can also use `docker inspect` to return the container's name. $ sudo docker inspect -f "{{ .Name }}" aed84ee21bde /web > **Note:** > Container names have to be unique. That means you can only call -> one container `web`. If you want to re-use a container name you must delete the -> old container with the `docker rm` command before you can create a new +> one container `web`. If you want to re-use a container name you must delete +> the old container (with `docker rm`) before you can create a new > container with the same name. As an alternative you can use the `--rm` > flag with the `docker run` command. This will delete the container -> immediately after it stops. +> immediately after it is stopped. ## Container Linking -Links allow containers to discover and securely communicate with each -other. To create a link you use the `--link` flag. Let's create a new -container, this one a database. +Links allow containers to discover each other and securely transfer information about one +container to another container. When you set up a link, you create a conduit between a +source container and a recipient container. The recipient can then access select data +about the source. To create a link, you use the `--link` flag. First, create a new +container, this time one containing a database. $ sudo docker run -d --name db training/postgres -Here we've created a new container called `db` using the `training/postgres` +This creates a new container called `db` from the `training/postgres` image, which contains a PostgreSQL database. -Now let's create a new `web` container and link it with our `db` container. +Now, you need to delete the `web` container you created previously so you can replace it +with a linked one: + + $ docker rm -f web + +Now, create a new `web` container and link it with your `db` container. $ sudo docker run -d -P --name web --link db:db training/webapp python app.py -This will link the new `web` container with the `db` container we created +This will link the new `web` container with the `db` container you created earlier. The `--link` flag takes the form: --link name:alias Where `name` is the name of the container we're linking to and `alias` is an -alias for the link name. We'll see how that alias gets used shortly. +alias for the link name. You'll see how that alias gets used shortly. -Let's look at our linked containers using `docker ps`. +Next, look at your linked containers using `docker ps`. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 349169744e49 training/postgres:latest su postgres -c '/usr About a minute ago Up About a minute 5432/tcp db - aed84ee21bde training/webapp:latest python app.py 16 hours ago Up 2 minutes 0.0.0.0:49154->5000/tcp db/web,web + 349169744e49 training/postgres:latest su postgres -c '/usr About a minute ago Up About a minute 5432/tcp db, web/db + aed84ee21bde training/webapp:latest python app.py 16 hours ago Up 2 minutes 0.0.0.0:49154->5000/tcp web -We can see our named containers, `db` and `web`, and we can see that the `web` -containers also shows `db/web` in the `NAMES` column. This tells us that the -`web` container is linked to the `db` container in a parent/child relationship. +You can see your named containers, `db` and `web`, and you can see that the `db` +container also shows `web/db` in the `NAMES` column. This tells you that the +`web` container is linked to the `db` container, which allows it to access information +about the `db` container. -So what does linking the containers do? Well we've discovered the link creates -a parent-child relationship between the two containers. The parent container, -here `db`, can access information on the child container `web`. To do this -Docker creates a secure tunnel between the containers without the need to -expose any ports externally on the container. You'll note when we started the -`db` container we did not use either of the `-P` or `-p` flags. As we're -linking the containers we don't need to expose the PostgreSQL database via the -network. +So what does linking the containers actually do? You've learned that a link creates a +source container that can provide information about itself to a recipient container. In +our example, the recipient, `web`, can access information about the source `db`. To do +this, Docker creates a secure tunnel between the containers that doesn't need to +expose any ports externally on the container; you'll note when we started the +`db` container we did not use either the `-P` or `-p` flags. That's a big benefit of +linking: we don't need to expose the source container, here the PostgreSQL database, to +the network. -Docker exposes connectivity information for the parent container inside the -child container in two ways: +Docker exposes connectivity information for the source container to the +recipient container in two ways: * Environment variables, * Updating the `/etc/hosts` file. -Let's look first at the environment variables Docker sets. Let's run the `env` -command to list the container's environment variables. +Docker can set a number of environment variables. You run the `env` +command to list the specified container's environment variables. ``` $ sudo docker run --rm --name web2 --link db:db training/webapp env @@ -191,26 +198,27 @@ command to list the container's environment variables. > container. Similarly, some daemons (such as `sshd`) > will scrub them when spawning shells for connection. -We can see that Docker has created a series of environment variables with -useful information about our `db` container. Each variable is prefixed with -`DB_` which is populated from the `alias` we specified above. If our `alias` -were `db1` the variables would be prefixed with `DB1_`. You can use these +You can see that Docker has created a series of environment variables with +useful information about the source `db` container. Each variable is prefixed with +`DB_`, which is populated from the `alias` you specified above. If the `alias` +were `db1`, the variables would be prefixed with `DB1_`. You can use these environment variables to configure your applications to connect to the database -on the `db` container. The connection will be secure, private and only the +on the `db` container. The connection will be secure and private; only the linked `web` container will be able to talk to the `db` container. -In addition to the environment variables Docker adds a host entry for the -linked parent to the `/etc/hosts` file. Let's look at this file on the `web` -container now. +In addition to the environment variables, Docker adds a host entry for the +source container to the `/etc/hosts` file. Here's an entry for the `web` +container: + $ sudo docker run -t -i --rm --link db:db training/webapp /bin/bash root@aed84ee21bde:/opt/webapp# cat /etc/hosts 172.17.0.7 aed84ee21bde . . . 172.17.0.5 db -We can see two relevant host entries. The first is an entry for the `web` +You can see two relevant host entries. The first is an entry for the `web` container that uses the Container ID as a host name. The second entry uses the -link alias to reference the IP address of the `db` container. Let's try to ping +link alias to reference the IP address of the `db` container. You can ping that host now via this host name. root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping @@ -221,21 +229,22 @@ that host now via this host name. 56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms > **Note:** -> We had to install `ping` because our container didn't have it. +> In the example, you'll note you had to install `ping` because it was not included +> in the container initially. -We've used the `ping` command to ping the `db` container using it's host entry -which resolves to `172.17.0.5`. We can make use of this host entry to configure -an application to make use of our `db` container. +Here, you used the `ping` command to ping the `db` container using its host entry, +which resolves to `172.17.0.5`. You can use this host entry to configure an application +to make use of your `db` container. > **Note:** -> You can link multiple child containers to a single parent. For -> example, we could have multiple web containers attached to our `db` -> container. +> You can link multiple recipient containers to a single source. For +> example, you could have multiple (differently named) web containers attached to your +>`db` container. # Next step -Now we know how to link Docker containers together the next step is -learning how to manage data, volumes and mounts inside our containers. +Now that you know how to link Docker containers together, the next step is +learning how to manage data, volumes and mounts inside your containers. Go to [Managing Data in Containers](/userguide/dockervolumes). diff --git a/components/engine/docs/sources/userguide/dockervolumes.md b/components/engine/docs/sources/userguide/dockervolumes.md index 93ac37b1cc..97593a1e04 100644 --- a/components/engine/docs/sources/userguide/dockervolumes.md +++ b/components/engine/docs/sources/userguide/dockervolumes.md @@ -59,9 +59,10 @@ absolute path and if the directory doesn't exist Docker will automatically create it for you. > **Note:** -> This is not available from a `Dockerfile` due the portability +> This is not available from a `Dockerfile` due to the portability > and sharing purpose of it. As the host directory is, by its nature, -> host-dependent it might not work all hosts. +> host-dependent, a host directory specified in a `Dockerfile` probably +> wouldn't work on all hosts. Docker defaults to a read-write volume but we can also mount a directory read-only. @@ -71,6 +72,24 @@ read-only. Here we've mounted the same `/src/webapp` directory but we've added the `ro` option to specify that the mount should be read-only. +### Mount a Host File as a Data Volume + +The `-v` flag can also be used to mount a single file - instead of *just* +directories - from the host machine. + + $ sudo docker run --rm -it -v ~/.bash_history:/.bash_history ubuntu /bin/bash + +This will drop you into a bash shell in a new container, you will have your bash +history from the host and when you exit the container, the host will have the +history of the commands typed while in the container. + +> **Note:** +> Many tools used to edit files including `vi` and `sed --in-place` may result +> in an inode change. Since Docker v1.1.0, this will produce an error such as +> "*sed: cannot rename ./sedKdJ9Dy: Device or resource busy*". In the case where +> you want to edit the mounted file, it is often easiest to instead mount the +> parent directory. + ## Creating and mounting a Data Volume Container If you have some persistent data that you want to share between @@ -80,7 +99,7 @@ it. Let's create a new named container with a volume to share. - $ sudo docker run -d -v /dbdata --name dbdata training/postgres + $ sudo docker run -d -v /dbdata --name dbdata training/postgres echo Data-only container for postgres You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container. @@ -112,14 +131,14 @@ like so: $ sudo docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata -Here's we've launched a new container and mounted the volume from the +Here we've launched a new container and mounted the volume from the `dbdata` container. We've then mounted a local host directory as `/backup`. Finally, we've passed a command that uses `tar` to backup the contents of the `dbdata` volume to a `backup.tar` file inside our `/backup` directory. When the command completes and the container stops we'll be left with a backup of our `dbdata` volume. -You could then to restore to the same container, or another that you've made +You could then restore it to the same container, or another that you've made elsewhere. Create a new container. $ sudo docker run -v /dbdata --name dbdata2 ubuntu /bin/bash diff --git a/components/engine/docs/sources/userguide/index.md b/components/engine/docs/sources/userguide/index.md index eef59c000b..08d6be0731 100644 --- a/components/engine/docs/sources/userguide/index.md +++ b/components/engine/docs/sources/userguide/index.md @@ -87,7 +87,7 @@ Go to [Working with Docker Hub](/userguide/dockerrepos). * [Docker blog](http://blog.docker.com/) * [Docker documentation](http://docs.docker.com/) * [Docker Getting Started Guide](http://www.docker.com/gettingstarted/) -* [Docker code on GitHub](https://github.com/dotcloud/docker) +* [Docker code on GitHub](https://github.com/docker/docker) * [Docker mailing list](https://groups.google.com/forum/#!forum/docker-user) * Docker on IRC: irc.freenode.net and channel #docker diff --git a/components/engine/docs/sources/userguide/login-web.png b/components/engine/docs/sources/userguide/login-web.png index 8fe04d829e..e9d26b5e80 100644 Binary files a/components/engine/docs/sources/userguide/login-web.png and b/components/engine/docs/sources/userguide/login-web.png differ diff --git a/components/engine/docs/sources/userguide/register-confirm.png b/components/engine/docs/sources/userguide/register-confirm.png deleted file mode 100644 index 4057cbe965..0000000000 Binary files a/components/engine/docs/sources/userguide/register-confirm.png and /dev/null differ diff --git a/components/engine/docs/sources/userguide/register-web.png b/components/engine/docs/sources/userguide/register-web.png index 2c950d2e4b..6c549f8fd3 100644 Binary files a/components/engine/docs/sources/userguide/register-web.png and b/components/engine/docs/sources/userguide/register-web.png differ diff --git a/components/engine/docs/sources/userguide/search.png b/components/engine/docs/sources/userguide/search.png index 27370741a7..ded0d0d2d3 100644 Binary files a/components/engine/docs/sources/userguide/search.png and b/components/engine/docs/sources/userguide/search.png differ diff --git a/components/engine/docs/sources/userguide/usingdocker.md b/components/engine/docs/sources/userguide/usingdocker.md index 857eac5e56..a882a79c7d 100644 --- a/components/engine/docs/sources/userguide/usingdocker.md +++ b/components/engine/docs/sources/userguide/usingdocker.md @@ -76,7 +76,7 @@ command: Or you can also pass the `--help` flag to the `docker` binary. - $ sudo docker images --help + $ sudo docker attach --help This will display the help text and all available flags: diff --git a/components/engine/docs/theme/mkdocs/base.html b/components/engine/docs/theme/mkdocs/base.html index 8f2bd0603a..2f518b5d59 100644 --- a/components/engine/docs/theme/mkdocs/base.html +++ b/components/engine/docs/theme/mkdocs/base.html @@ -4,10 +4,11 @@ -{% set docker_version = "$VERSION" %}{% set docker_commit = "$GITCOMMIT" %}{% set docker_branch = "$GIT_BRANCH" %}{% set aws_bucket = "$AWS_S3_BUCKET" %} + {% set docker_version = "$VERSION" %}{% set major_minor = "$MAJOR_MINOR" %}{% set docker_commit = "$GITCOMMIT" %}{% set docker_branch = "$GIT_BRANCH" %}{% set aws_bucket = "$AWS_S3_BUCKET" %}{% set build_date = "$BUILD_DATE" %} + {% if meta.page_description %}{% endif %} {% if meta.page_keywords %}{% endif %} @@ -27,15 +28,15 @@ {% if config.google_analytics %} {% endif %} @@ -48,11 +49,23 @@
    {% if current_page.title != '**HIDDEN**' %}
    -
    +

    {{ current_page.title }}

    - {% endif %} @@ -119,19 +132,18 @@ piCId = '1482'; })();